code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, os, cmd, threading, code, re
from optparse import OptionParser
from androguard.core import *
from androguard.core.androgen import *
from androguard.core.androconf import *
from androguard.core.bytecode import *
from androguard.core.bytecodes.jvm import *
from androguard.core.bytecodes.dvm import *
from androguard.core.bytecodes.apk import *
from androguard.core.analysis.analysis import *
from androguard.core.analysis.ganalysis import *
from androguard.core.analysis.risk import *
from androguard.decompiler.decompiler import *
from androguard.core import androconf
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
from cPickle import dumps, loads
option_0 = { 'name' : ('-i', '--input'), 'help' : 'file : use this filename', 'nargs' : 1 }
option_1 = { 'name' : ('-d', '--display'), 'help' : 'display the file in human readable format', 'action' : 'count' }
option_2 = { 'name' : ('-m', '--method'), 'help' : 'display method(s) respect with a regexp', 'nargs' : 1 }
option_3 = { 'name' : ('-f', '--field'), 'help' : 'display field(s) respect with a regexp', 'nargs' : 1 }
option_4 = { 'name' : ('-s', '--shell'), 'help' : 'open an interactive shell to play more easily with objects', 'action' : 'count' }
option_5 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
option_6 = { 'name' : ('-p', '--pretty'), 'help' : 'pretty print !', 'action' : 'count' }
option_8 = { 'name' : ('-x', '--xpermissions'), 'help' : 'show paths of permissions', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6, option_8]
def init_print_colors():
from IPython.utils import coloransi, io
default_colors(coloransi.TermColors)
CONF["PRINT_FCT"] = io.stdout.write
def interact():
cfg = Config()
ipshell = InteractiveShellEmbed(config=cfg, banner1="Androlyze version %s" % androconf.ANDROGUARD_VERSION)
init_print_colors()
ipshell()
def save_session(l, filename):
"""
save your session !
:param l: a list of objects
:type: a list of object
:param filename: output filename to save the session
:type filename: string
:Example:
save_session([a, vm, vmx], "msession.json")
"""
fd = open(filename, "w")
fd.write(dumps(l, -1))
fd.close()
def load_session(filename):
"""
load your session !
:param filename: the filename where the session has been saved
:type filename: string
:rtype: the elements of your session :)
:Example:
a, vm, vmx = load_session("mysession.json")
"""
return loads(open(filename, "r").read())
def AnalyzeAPK(filename, raw=False, decompiler=None):
"""
Analyze an android application and setup all stuff for a more quickly analysis !
:param filename: the filename of the android application or a buffer which represents the application
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:param decompiler: ded, dex2jad, dad (optional)
:type decompiler: string
:rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("APK ...")
a = APK(filename, raw)
d, dx = AnalyzeDex(a.get_dex(), raw=True, decompiler=decompiler)
return a, d, dx
def AnalyzeDex(filename, raw=False, decompiler=None):
"""
Analyze an android dex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("DalvikVMFormat ...")
d = None
if raw == False:
d = DalvikVMFormat(open(filename, "rb").read())
else:
d = DalvikVMFormat(filename)
androconf.debug("Export VM to python namespace")
d.create_python_export()
androconf.debug("VMAnalysis ...")
dx = uVMAnalysis(d)
androconf.debug("GVMAnalysis ...")
gx = GVMAnalysis(dx, None)
d.set_vmanalysis(dx)
d.set_gvmanalysis(gx)
RunDecompiler(d, dx, decompiler)
androconf.debug("XREF ...")
d.create_xref()
androconf.debug("DREF ...")
d.create_dref()
return d, dx
def AnalyzeODex(filename, raw=False, decompiler=None):
"""
Analyze an android odex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param raw: True is you would like to use a buffer (optional)
:type raw: boolean
:rtype: return the :class:`DalvikOdexVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("DalvikOdexVMFormat ...")
d = None
if raw == False:
d = DalvikOdexVMFormat(open(filename, "rb").read())
else:
d = DalvikOdexVMFormat(filename)
androconf.debug("Export VM to python namespace")
d.create_python_export()
androconf.debug("VMAnalysis ...")
dx = uVMAnalysis(d)
androconf.debug("GVMAnalysis ...")
gx = GVMAnalysis(dx, None)
d.set_vmanalysis(dx)
d.set_gvmanalysis(gx)
RunDecompiler(d, dx, decompiler)
androconf.debug("XREF ...")
d.create_xref()
androconf.debug("DREF ...")
d.create_dref()
return d, dx
def RunDecompiler(d, dx, decompiler):
"""
Run the decompiler on a specific analysis
:param d: the DalvikVMFormat object
:type d: :class:`DalvikVMFormat` object
:param dx: the analysis of the format
:type dx: :class:`VMAnalysis` object
:param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded")
:type decompiler: string
"""
if decompiler != None:
androconf.debug("Decompiler ...")
decompiler = decompiler.lower()
if decompiler == "dex2jad":
d.set_decompiler(DecompilerDex2Jad(d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_JAD"],
androconf.CONF["BIN_JAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "dex2fernflower":
d.set_decompiler(DecompilerDex2Fernflower(d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_FERNFLOWER"],
androconf.CONF["BIN_FERNFLOWER"],
androconf.CONF["OPTIONS_FERNFLOWER"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "ded":
d.set_decompiler(DecompilerDed(d,
androconf.CONF["PATH_DED"],
androconf.CONF["BIN_DED"],
androconf.CONF["TMP_DIRECTORY"]))
else:
d.set_decompiler(DecompilerDAD(d, dx))
def AnalyzeElf(filename, raw=False):
# avoid to install smiasm for everybody
from androguard.core.binaries.elf import ELF
e = None
if raw == False:
e = ELF(open(filename, "rb").read())
else:
e = ELF(filename)
ExportElfToPython(e)
return e
def ExportElfToPython(e):
for function in e.get_functions():
name = "FUNCTION_" + function.name
setattr(e, name, function)
def AnalyzeJAR(filename, raw=False):
androconf.debug("JAR ...")
a = JAR(filename, raw)
d = AnalyzeClasses(a.get_classes())
return a, d
def AnalyzeClasses(classes):
d = {}
for i in classes:
d[i[0]] = JVMFormat(i[1])
return d
def main(options, arguments):
if options.shell != None:
interact()
elif options.input != None :
_a = AndroguardS( options.input )
if options.pretty != None :
init_print_colors()
if options.display != None :
if options.pretty != None :
_a.ianalyze()
_a.pretty_show()
else :
_a.show()
elif options.method != None :
for method in _a.get("method", options.method) :
if options.pretty != None :
_a.ianalyze()
method.pretty_show()
else :
method.show()
elif options.field != None :
for field in _a.get("field", options.field) :
field.show()
elif options.xpermissions != None :
_a.ianalyze()
perms_access = _a.get_analysis().get_permissions( [] )
for perm in perms_access :
print "PERM : ", perm
for path in perms_access[ perm ] :
show_Path( _a.get_vm(), path )
elif options.version != None :
print "Androlyze version %s" % androconf.ANDROGUARD_VERSION
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/ambient/ambientEnumDeclaration2.ts] ////
//// [ambientEnumDeclaration2.ts]
// In ambient enum declarations that specify no const modifier, enum member declarations
// that omit a value are considered computed members (as opposed to having auto- incremented values assigned).
declare enum E {
a, // E.a
b, // E.b
}
declare const enum E1 {
a, // E.a = 0
b, // E.b = 1
}
//// [ambientEnumDeclaration2.js]
"use strict";
// In ambient enum declarations that specify no const modifier, enum member declarations
// that omit a value are considered computed members (as opposed to having auto- incremented values assigned). | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/ambientEnumDeclaration2.js |
"""
Process validation data retrieved using fetch_validation_data.py. Two types
of data are expected. A file at `validations/unlisted-addons.txt` that contains
the guid of each unlisted addon and input on STDIN which has the validation
JSON data for each validation to check. See fetch_validation_data.py for how
this data is retrieved. Results are returned on STDOUT.
The following reports are supported:
* count - Return signing errors ordered by addon unique frequency in the
format: `error.id.dot.separated total_count unique_addon_count`.
* context - Return the context for 5 most common signing errors in the JSON
format: `{"context": ["", ...], "error": "error.id"}`.
Usage:
cat my-test-data-*.txt | python validations.py <report> > results.txt
"""
import itertools
import json
import sys
ACTION_CONTEXT = 'context'
ACTION_COUNT = 'count'
ACTIONS = (ACTION_CONTEXT, ACTION_COUNT)
def parse_validations(results):
return (json.loads(result) for result in results)
def unlisted_validations(results, unlisted_addons=None):
if unlisted_addons is None:
unlisted_addons = get_unlisted_addons()
return (result
for result in results
if ('id' in result['metadata'] and
(not result['metadata'].get('listed', True)
or result['metadata']['id'] in unlisted_addons)))
def severe_validations(results):
return (result
for result in results
if (result['signing_summary']['high'] > 0 or
result['signing_summary']['medium'] > 0 or
result['signing_summary']['low'] > 0))
def error_messages(results):
return ({'addon': result['metadata']['id'],
'message_id': '.'.join(message['id']),
'context': message['context']}
for result in results
for message in result['messages']
if 'signing_severity' in message)
def sort_by_message(results):
return sorted(results, key=lambda r: r['message_id'])
def group_by_message(results):
return itertools.groupby(results, lambda r: r['message_id'])
def extract_error_results(results):
for error, messages in results:
all_messages = list(messages)
yield {
'error': error,
'total': len(all_messages),
'unique': len(set(msg['addon'] for msg in all_messages)),
'contexts': [msg['context'] for msg in all_messages],
}
def sort_results_by_unique(results):
return sorted(results, reverse=True, key=lambda r: r['unique'])
def format_error_count(results):
return ('{error} {total} {unique}'.format(**result)
for result in results)
def format_contexts(results):
for result in results:
for context in result['contexts']:
yield json.dumps({
'error': result['error'],
'context': context,
})
def get_unlisted_addons():
with open('validations/unlisted-addons.txt') as f:
return set(guid.strip() for guid in f)
def main(action):
pipeline = [
parse_validations,
unlisted_validations,
severe_validations,
error_messages,
sort_by_message,
group_by_message,
extract_error_results,
sort_results_by_unique,
]
if action == ACTION_CONTEXT:
# Only get context for the top 5 errors (they're already sorted by
# unique occurrences so we can just take the first 5).
pipeline.append(lambda results: itertools.islice(results, 5))
pipeline.append(format_contexts)
elif action == ACTION_COUNT:
pipeline.append(format_error_count)
else:
raise ValueError('{0} is not a valid action'.format(action))
process_pipeline(pipeline)
def process_pipeline(pipeline):
# Read from STDIN.
val = sys.stdin
# Process through the pipeline.
for fn in pipeline:
val = fn(val)
# Print the results.
for line in val:
print line
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in ACTIONS:
print """Usage: python {name} <action>
action: {actions}
values are read from STDIN""".format(
name=sys.argv[0], actions='|'.join(ACTIONS))
sys.exit(1)
else:
main(sys.argv[1]) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import hs_core.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('pages', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('hs_core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='NetcdfMetaData',
fields=[
('coremetadata_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='hs_core.CoreMetaData')),
],
options={
},
bases=('hs_core.coremetadata',),
),
migrations.CreateModel(
name='NetcdfResource',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('comments_count', models.IntegerField(default=0, editable=False)),
('rating_count', models.IntegerField(default=0, editable=False)),
('rating_sum', models.IntegerField(default=0, editable=False)),
('rating_average', models.FloatField(default=0, editable=False)),
('public', models.BooleanField(default=True, help_text=b'If this is true, the resource is viewable and downloadable by anyone')),
('frozen', models.BooleanField(default=False, help_text=b'If this is true, the resource should not be modified')),
('do_not_distribute', models.BooleanField(default=False, help_text=b'If this is true, the resource owner has to designate viewers')),
('discoverable', models.BooleanField(default=True, help_text=b'If this is true, it will turn up in searches.')),
('published_and_frozen', models.BooleanField(default=False, help_text=b'Once this is true, no changes can be made to the resource')),
('content', models.TextField()),
('short_id', models.CharField(default=hs_core.models.short_id, max_length=32, db_index=True)),
('doi', models.CharField(help_text=b"Permanent identifier. Never changes once it's been set.", max_length=1024, null=True, db_index=True, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('creator', models.ForeignKey(related_name='creator_of_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'This is the person who first uploaded the resource')),
('edit_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can edit the resource', related_name='group_editable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('edit_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can edit the resource', related_name='user_editable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
('last_changed_by', models.ForeignKey(related_name='last_changed_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL, help_text=b'The person who last changed the resource', null=True)),
('owners', models.ManyToManyField(help_text=b'The person who has total ownership of the resource', related_name='owns_hs_app_netcdf_netcdfresource', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name='netcdfresources', verbose_name='Author', to=settings.AUTH_USER_MODEL)),
('view_groups', models.ManyToManyField(help_text=b'This is the set of Hydroshare Groups who can view the resource', related_name='group_viewable_hs_app_netcdf_netcdfresource', null=True, to='auth.Group', blank=True)),
('view_users', models.ManyToManyField(help_text=b'This is the set of Hydroshare Users who can view the resource', related_name='user_viewable_hs_app_netcdf_netcdfresource', null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Multidimensional (NetCDF)',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='OriginalCoverage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('_value', models.CharField(max_length=1024, null=True)),
('projection_string_type', models.CharField(max_length=20, null=True, choices=[(b'', b'---------'), (b'EPSG Code', b'EPSG Code'), (b'OGC WKT Projection', b'OGC WKT Projection'), (b'Proj4 String', b'Proj4 String')])),
('projection_string_text', models.TextField(null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_originalcoverage_related', to='contenttypes.ContentType')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=100)),
('unit', models.CharField(max_length=100)),
('type', models.CharField(max_length=100, choices=[(b'Char', b'Char'), (b'Byte', b'Byte'), (b'Short', b'Short'), (b'Int', b'Int'), (b'Float', b'Float'), (b'Double', b'Double'), (b'Int64', b'Int64'), (b'Unsigned Byte', b'Unsigned Byte'), (b'Unsigned Short', b'Unsigned Short'), (b'Unsigned Int', b'Unsigned Int'), (b'Unsigned Int64', b'Unsigned Int64'), (b'String', b'String'), (b'User Defined Type', b'User Defined Type'), (b'Unknown', b'Unknown')])),
('shape', models.CharField(max_length=100)),
('descriptive_name', models.CharField(max_length=100, null=True, verbose_name=b'long name', blank=True)),
('method', models.TextField(null=True, verbose_name=b'comment', blank=True)),
('missing_value', models.CharField(max_length=100, null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_app_netcdf_variable_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='originalcoverage',
unique_together=set([('content_type', 'object_id')]),
),
] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2013, 2014 Austin Clements
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import errno
import argparse
import shlex
import json
import subprocess
import re
import collections
import hashlib
import shutil
import curses
import filecmp
import io
import traceback
import time
try:
import fcntl
except ImportError:
# Non-UNIX platform
fcntl = None
def debug(string, *args):
if debug.enabled:
print(string.format(*args), file=sys.stderr)
debug.enabled = False
def debug_exc():
if debug.enabled:
traceback.print_exc()
def main():
# Parse command-line
arg_parser = argparse.ArgumentParser(
description="""A 21st century LaTeX wrapper,
%(prog)s runs latex (and bibtex) the right number of times so you
don't have to,
strips the log spew to make errors visible,
and plays well with standard build tools."""
)
arg_parser.add_argument(
"-o",
metavar="FILE",
dest="output",
default=None,
help="Output file name (default: derived from input file)",
)
arg_parser.add_argument(
"--latex-cmd",
metavar="CMD",
default="pdflatex",
help="Latex command (default: %(default)s)",
)
arg_parser.add_argument(
"--latex-args",
metavar="ARGS",
type=arg_parser_shlex,
help="Additional command-line arguments for latex."
" This will be parsed and split using POSIX shell rules.",
)
arg_parser.add_argument(
"--bibtex-cmd",
metavar="CMD",
default="bibtex",
help="Bibtex command (default: %(default)s)",
)
arg_parser.add_argument(
"--bibtex-args",
metavar="ARGS",
type=arg_parser_shlex,
help="Additional command-line arguments for bibtex",
)
arg_parser.add_argument(
"--max-iterations",
metavar="N",
type=int,
default=10,
help="Max number of times to run latex before giving up"
" (default: %(default)s)",
)
arg_parser.add_argument(
"-W",
metavar="(no-)CLASS",
action=ArgParserWarnAction,
dest="nowarns",
default=set(["underfull"]),
help="Enable/disable warning from CLASS, which can be any package name, "
"LaTeX warning class (e.g., font), bad box type "
'(underfull, overfull, loose, tight), or "all"',
)
arg_parser.add_argument(
"-O",
metavar="DIR",
dest="obj_dir",
default="latex.out",
help="Directory for intermediate files and control database "
"(default: %(default)s)",
)
arg_parser.add_argument(
"--color",
choices=("auto", "always", "never"),
default="auto",
help="When to colorize messages",
)
arg_parser.add_argument(
"--verbose-cmds",
action="store_true",
default=False,
help="Print commands as they are executed",
)
arg_parser.add_argument(
"--debug", action="store_true", help="Enable detailed debug output"
)
actions = arg_parser.add_argument_group("actions")
actions.add_argument("--clean-all", action="store_true", help="Delete output files")
actions.add_argument("file", nargs="?", help=".tex file to compile")
args = arg_parser.parse_args()
if not any([args.clean_all, args.file]):
arg_parser.error("at least one action is required")
args.latex_args = args.latex_args or []
args.bibtex_args = args.bibtex_args or []
verbose_cmd.enabled = args.verbose_cmds
debug.enabled = args.debug
# A note about encodings: POSIX encoding is a mess; TeX encoding
# is a disaster. Our goal is to make things no worse, so we want
# byte-accurate round-tripping of TeX messages. Since TeX
# messages are *basically* text, we use strings and
# surrogateescape'ing for both input and output. I'm not fond of
# setting surrogateescape globally, but it's far easier than
# dealing with every place we pass TeX output through.
# Conveniently, JSON can round-trip surrogateescape'd strings, so
# our control database doesn't need special handling.
sys.stdout = io.TextIOWrapper(
sys.stdout.buffer,
encoding=sys.stdout.encoding,
errors="surrogateescape",
line_buffering=sys.stdout.line_buffering,
)
sys.stderr = io.TextIOWrapper(
sys.stderr.buffer,
encoding=sys.stderr.encoding,
errors="surrogateescape",
line_buffering=sys.stderr.line_buffering,
)
Message.setup_color(args.color)
# Open control database.
dbpath = os.path.join(args.obj_dir, ".latexrun.db")
if not os.path.exists(dbpath) and os.path.exists(".latexrun.db"):
# The control database used to live in the source directory.
# Support this for backwards compatibility.
dbpath = ".latexrun.db"
try:
db = DB(dbpath)
except (ValueError, OSError) as e:
print(
"error opening {}: {}".format(
e.filename if hasattr(e, "filename") else dbpath, e
),
file=sys.stderr,
)
debug_exc()
sys.exit(1)
# Clean
if args.clean_all:
try:
db.do_clean(args.obj_dir)
except OSError as e:
print(e, file=sys.stderr)
debug_exc()
sys.exit(1)
# Build
if not args.file:
return
task_commit = None
try:
task_latex = LaTeX(
db, args.file, args.latex_cmd, args.latex_args, args.obj_dir, args.nowarns
)
task_commit = LaTeXCommit(db, task_latex, args.output)
task_bibtex = BibTeX(
db,
task_latex,
args.bibtex_cmd,
args.bibtex_args,
args.nowarns,
args.obj_dir,
)
tasks = [task_latex, task_commit, task_bibtex]
stable = run_tasks(tasks, args.max_iterations)
# Print final task output and gather exit status
status = 0
for task in tasks:
status = max(task.report(), status)
if not stable:
print(
"error: files are still changing after {} iterations; giving up".format(
args.max_iterations
),
file=sys.stderr,
)
status = max(status, 1)
except TaskError as e:
print(str(e), file=sys.stderr)
debug_exc()
status = 1
# Report final status, if interesting
fstatus = "There were errors" if task_commit is None else task_commit.status
if fstatus:
output = args.output
if output is None:
if task_latex.get_outname() is not None:
output = os.path.basename(task_latex.get_outname())
else:
output = "output"
if Message._color:
terminfo.send("bold", ("setaf", 1))
print("{}; {} not updated".format(fstatus, output))
if Message._color:
terminfo.send("sgr0")
sys.exit(status)
def arg_parser_shlex(string):
"""Argument parser for shell token lists."""
try:
return shlex.split(string)
except ValueError as e:
raise argparse.ArgumentTypeError(str(e)) from None
class ArgParserWarnAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
nowarn = getattr(namespace, self.dest)
if value == "all":
nowarn.clear()
elif value.startswith("no-"):
nowarn.add(value[3:])
else:
nowarn.discard(value)
setattr(namespace, self.dest, nowarn)
def verbose_cmd(args, cwd=None, env=None):
if verbose_cmd.enabled:
cmd = " ".join(map(shlex.quote, args))
if cwd is not None:
cmd = "(cd {} && {})".format(shlex.quote(cwd), cmd)
if env is not None:
for k, v in env.items():
if os.environ.get(k) != v:
cmd = "{}={} {}".format(k, shlex.quote(v), cmd)
print(cmd, file=sys.stderr)
verbose_cmd.enabled = False
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class DB:
"""A latexrun control database."""
_VERSION = "latexrun-db-v2"
def __init__(self, filename):
self.__filename = filename
# Make sure database directory exists
if os.path.dirname(self.__filename):
os.makedirs(os.path.dirname(self.__filename), exist_ok=True)
# Lock the database if possible. We don't release this lock
# until the process exits.
lockpath = self.__filename + ".lock"
if fcntl is not None:
lockfd = os.open(lockpath, os.O_CREAT | os.O_WRONLY | os.O_CLOEXEC, 0o666)
# Note that this is actually an fcntl lock, not a lockf
# lock. Don't be fooled.
fcntl.lockf(lockfd, fcntl.LOCK_EX, 1)
try:
fp = open(filename, "r")
except FileNotFoundError:
debug("creating new database")
self.__val = {"version": DB._VERSION}
else:
debug("loading database")
self.__val = json.load(fp)
if "version" not in self.__val:
raise ValueError(
"file exists, but does not appear to be a latexrun database".format(
filename
)
)
if self.__val["version"] != DB._VERSION:
raise ValueError(
"unknown database version {!r}".format(self.__val["version"])
)
def commit(self):
debug("committing database")
# Atomically commit database
tmp_filename = self.__filename + ".tmp"
with open(tmp_filename, "w") as fp:
json.dump(self.__val, fp, indent=2, separators=(",", ": "))
fp.flush()
os.fsync(fp.fileno())
os.rename(tmp_filename, self.__filename)
def get_summary(self, task_id):
"""Return the recorded summary for the given task or None."""
return self.__val.get("tasks", {}).get(task_id)
def set_summary(self, task_id, summary):
"""Set the summary for the given task."""
self.__val.setdefault("tasks", {})[task_id] = summary
def add_clean(self, filename):
"""Add an output file to be cleaned.
Unlike the output files recorded in the task summaries,
cleanable files strictly accumulate until a clean is
performed.
"""
self.__val.setdefault("clean", {})[filename] = hash_cache.get(filename)
def do_clean(self, obj_dir=None):
"""Remove output files and delete database.
If obj_dir is not None and it is empty after all files are
removed, it will also be removed.
"""
for f, want_hash in self.__val.get("clean", {}).items():
have_hash = hash_cache.get(f)
if have_hash is not None:
if want_hash == have_hash:
debug("unlinking {}", f)
hash_cache.invalidate(f)
os.unlink(f)
else:
print(
"warning: {} has changed; not removing".format(f),
file=sys.stderr,
)
self.__val = {"version": DB._VERSION}
try:
os.unlink(self.__filename)
except FileNotFoundError:
pass
if obj_dir is not None:
try:
os.rmdir(obj_dir)
except OSError:
pass
class HashCache:
"""Cache of file hashes.
As latexrun reaches fixed-point, it hashes the same files over and
over, many of which never change. Since hashing is somewhat
expensive, we keep a simple cache of these hashes.
"""
def __init__(self):
self.__cache = {}
def get(self, filename):
"""Return the hash of filename, or * if it was clobbered."""
try:
with open(filename, "rb") as fp:
st = os.fstat(fp.fileno())
key = (st.st_dev, st.st_ino)
if key in self.__cache:
return self.__cache[key]
debug("hashing {}", filename)
h = hashlib.sha256()
while True:
block = fp.read(256 * 1024)
if not len(block):
break
h.update(block)
self.__cache[key] = h.hexdigest()
return self.__cache[key]
except (FileNotFoundError, IsADirectoryError):
return None
def clobber(self, filename):
"""If filename's hash is not known, record an invalid hash.
This can be used when filename was overwritten before we were
necessarily able to obtain its hash. filename must exist.
"""
st = os.stat(filename)
key = (st.st_dev, st.st_ino)
if key not in self.__cache:
self.__cache[key] = "*"
def invalidate(self, filename):
try:
st = os.stat(filename)
except OSError as e:
# Pessimistically wipe the whole cache
debug("wiping hash cache ({})", e)
self.__cache.clear()
else:
key = (st.st_dev, st.st_ino)
if key in self.__cache:
del self.__cache[key]
hash_cache = HashCache()
class _Terminfo:
def __init__(self):
self.__tty = os.isatty(sys.stdout.fileno())
if self.__tty:
curses.setupterm()
self.__ti = {}
def __ensure(self, cap):
if cap not in self.__ti:
if not self.__tty:
string = None
else:
string = curses.tigetstr(cap)
if string is None or b"$<" in string:
# Don't have this capability or it has a pause
string = None
self.__ti[cap] = string
return self.__ti[cap]
def has(self, *caps):
return all(self.__ensure(cap) is not None for cap in caps)
def send(self, *caps):
# Flush TextIOWrapper to the binary IO buffer
sys.stdout.flush()
for cap in caps:
# We should use curses.putp here, but it's broken in
# Python3 because it writes directly to C's buffered
# stdout and there's no way to flush that.
if isinstance(cap, tuple):
s = curses.tparm(self.__ensure(cap[0]), *cap[1:])
else:
s = self.__ensure(cap)
sys.stdout.buffer.write(s)
terminfo = _Terminfo()
class Progress:
_enabled = None
def __init__(self, prefix):
self.__prefix = prefix
if Progress._enabled is None:
Progress._enabled = (not debug.enabled) and terminfo.has(
"cr", "el", "rmam", "smam"
)
def __enter__(self):
self.last = ""
self.update("")
return self
def __exit__(self, typ, value, traceback):
if Progress._enabled:
# Beginning of line and clear
terminfo.send("cr", "el")
sys.stdout.flush()
def update(self, msg):
if not Progress._enabled:
return
out = "[" + self.__prefix + "]"
if msg:
out += " " + msg
if out != self.last:
# Beginning of line, clear line, disable wrap
terminfo.send("cr", "el", "rmam")
sys.stdout.write(out)
# Enable wrap
terminfo.send("smam")
self.last = out
sys.stdout.flush()
class Message(collections.namedtuple("Message", "typ filename lineno msg")):
def emit(self):
if self.filename:
if self.filename.startswith("./"):
finfo = self.filename[2:]
else:
finfo = self.filename
else:
finfo = "<no file>"
if self.lineno is not None:
finfo += ":" + str(self.lineno)
finfo += ": "
if self._color:
terminfo.send("bold")
sys.stdout.write(finfo)
if self.typ != "info":
if self._color:
terminfo.send(("setaf", 5 if self.typ == "warning" else 1))
sys.stdout.write(self.typ + ": ")
if self._color:
terminfo.send("sgr0")
sys.stdout.write(self.msg + "\n")
@classmethod
def setup_color(cls, state):
if state == "never":
cls._color = False
elif state == "always":
cls._color = True
elif state == "auto":
cls._color = terminfo.has("setaf", "bold", "sgr0")
else:
raise ValueError("Illegal color state {:r}".format(state))
##################################################################
# Task framework
#
terminate_task_loop = False
start_time = time.time()
def run_tasks(tasks, max_iterations):
"""Execute tasks in round-robin order until all are stable.
This will also exit if terminate_task_loop is true. Tasks may use
this to terminate after a fatal error (even if that fatal error
doesn't necessarily indicate stability; as long as re-running the
task will never eliminate the fatal error).
Return True if fixed-point is reached or terminate_task_loop is
set within max_iterations iterations.
"""
global terminate_task_loop
terminate_task_loop = False
nstable = 0
for iteration in range(max_iterations):
for task in tasks:
if task.stable():
nstable += 1
if nstable == len(tasks):
debug("fixed-point reached")
return True
else:
task.run()
nstable = 0
if terminate_task_loop:
debug("terminate_task_loop set")
return True
debug("fixed-point not reached")
return False
class TaskError(Exception):
pass
class Task:
"""A deterministic computation whose inputs and outputs can be captured."""
def __init__(self, db, task_id):
self.__db = db
self.__task_id = task_id
def __debug(self, string, *args):
if debug.enabled:
debug("task {}: {}", self.__task_id, string.format(*args))
def stable(self):
"""Return True if running this task will not affect system state.
Functionally, let f be the task, and s be the system state.
Then s' = f(s). If it must be that s' == s (that is, f has
reached a fixed point), then this function must return True.
"""
last_summary = self.__db.get_summary(self.__task_id)
if last_summary is None:
# Task has never run, so running it will modify system
# state
changed = "never run"
else:
# If any of the inputs have changed since the last run of
# this task, the result may change, so re-run the task.
# Also, it's possible something else changed an output
# file, in which case we also want to re-run the task, so
# check the outputs, too.
changed = self.__summary_changed(last_summary)
if changed:
self.__debug("unstable (changed: {})", changed)
return False
else:
self.__debug("stable")
return True
def __summary_changed(self, summary):
"""Test if any inputs changed from summary.
Returns a string describing the changed input, or None.
"""
for dep in summary["deps"]:
fn, args, val = dep
method = getattr(self, "_input_" + fn, None)
if method is None:
return "unknown dependency method {}".format(fn)
if method == self._input_unstable or method(*args) != val:
return "{}{}".format(fn, tuple(args))
return None
def _input(self, name, *args):
"""Register an input for this run.
This calls self._input_<name>(*args) to get the value of this
input. This function should run quickly and return some
projection of system state that affects the result of this
computation.
Both args and the return value must be JSON serializable.
"""
method = getattr(self, "_input_" + name)
val = method(*args)
if [name, args, val] not in self.__deps:
self.__deps.append([name, args, val])
return val
def run(self):
# Before we run the task, pre-hash any files that were output
# files in the last run. These may be input by this run and
# then clobbered, at which point it will be too late to get an
# input hash. Ideally we would only hash files that were
# *both* input and output files, but latex doesn't tell us
# about input files that didn't exist, so if we start from a
# clean slate, we often require an extra run because we don't
# know a file is input/output until after the second run.
last_summary = self.__db.get_summary(self.__task_id)
if last_summary is not None:
for io_filename in last_summary["output_files"]:
self.__debug("pre-hashing {}", io_filename)
hash_cache.get(io_filename)
# Run the task
self.__debug("running")
self.__deps = []
result = self._execute()
# Clear cached output file hashes
for filename in result.output_filenames:
hash_cache.invalidate(filename)
# If the output files change, then the computation needs to be
# re-run, so record them as inputs
for filename in result.output_filenames:
self._input("file", filename)
# Update task summary in database
self.__db.set_summary(self.__task_id, self.__make_summary(self.__deps, result))
del self.__deps
# Add output files to be cleaned
for f in result.output_filenames:
self.__db.add_clean(f)
try:
self.__db.commit()
except OSError as e:
raise TaskError(
"error committing control database {}: {}".format(
getattr(e, "filename", "<unknown path>"), e
)
) from e
def __make_summary(self, deps, run_result):
"""Construct a new task summary."""
return {
"deps": deps,
"output_files": {f: hash_cache.get(f) for f in run_result.output_filenames},
"extra": run_result.extra,
}
def _execute(self):
"""Abstract: Execute this task.
Subclasses should implement this method to execute this task.
This method must return a RunResult giving the inputs that
were used by the task and the outputs it produced.
"""
raise NotImplementedError("Task._execute is abstract")
def _get_result_extra(self):
"""Return the 'extra' result from the previous run, or None."""
summary = self.__db.get_summary(self.__task_id)
if summary is None:
return None
return summary["extra"]
def report(self):
"""Report the task's results to stdout and return exit status.
This may be called when the task has never executed.
Subclasses should override this. The default implementation
reports nothing and returns 0.
"""
return 0
# Standard input functions
def _input_env(self, var):
return os.environ.get(var)
def _input_file(self, path):
return hash_cache.get(path)
def _input_unstable(self):
"""Mark this run as unstable, regardless of other inputs."""
return None
def _input_unknown_input(self):
"""An unknown input that may change after latexrun exits.
This conservatively marks some unknown input that definitely
won't change while latexrun is running, but may change before
the user next runs latexrun. This allows the task to
stabilize during this invocation, but will cause the task to
re-run on the next invocation.
"""
return start_time
class RunResult(collections.namedtuple("RunResult", "output_filenames extra")):
"""The result of a single task execution.
This captures all files written by the task, and task-specific
results that need to be persisted between runs (for example, to
enable reporting of a task's results).
"""
pass
##################################################################
# LaTeX task
#
def normalize_input_path(path):
# Resolve the directory of the input path, but leave the file
# component alone because it affects TeX's behavior.
head, tail = os.path.split(path)
npath = os.path.join(os.path.realpath(head), tail)
return os.path.relpath(path)
class LaTeX(Task):
def __init__(self, db, tex_filename, cmd, cmd_args, obj_dir, nowarns):
super().__init__(db, "latex::" + normalize_input_path(tex_filename))
self.__tex_filename = tex_filename
self.__cmd = cmd
self.__cmd_args = cmd_args
self.__obj_dir = obj_dir
self.__nowarns = nowarns
self.__pass = 0
def _input_args(self):
# If filename starts with a character the tex command-line
# treats specially, then tweak it so it doesn't.
filename = self.__tex_filename
if filename.startswith(("-", "&", "\\")):
filename = "./" + filename
# XXX Put these at the beginning in case the provided
# arguments are malformed. Might want to do a best-effort
# check for incompatible user-provided arguments (note:
# arguments can be given with one or two dashes and those with
# values can use an equals or a space).
return (
[self.__cmd]
+ self.__cmd_args
+ [
"-interaction",
"nonstopmode",
"-recorder",
"-output-directory",
self.__obj_dir,
filename,
]
)
def _execute(self):
# Run latex
self.__pass += 1
args = self._input("args")
debug("running {}", args)
try:
os.makedirs(self.__obj_dir, exist_ok=True)
except OSError as e:
raise TaskError("failed to create %s: " % self.__obj_dir + str(e)) from e
try:
verbose_cmd(args)
p = subprocess.Popen(
args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, has_errors, missing_includes = self.__feed_terminal(p.stdout)
status = p.wait()
except OSError as e:
raise TaskError("failed to execute latex task: " + str(e)) from e
# Register environment variable inputs
for env_var in [
"TEXMFOUTPUT",
"TEXINPUTS",
"TEXFORMATS",
"TEXPOOL",
"TFMFONTS",
"PATH",
]:
self._input("env", env_var)
jobname, outname = self.__parse_jobname(stdout)
inputs, outputs = self.__parse_recorder(jobname)
# LaTeX overwrites its own inputs. Mark its output files as
# clobbered before we hash its input files.
for path in outputs:
# In some abort cases (e.g., >=100 errors), LaTeX claims
# output files that don't actually exist.
if os.path.exists(path):
hash_cache.clobber(path)
# Depend on input files. Task.run pre-hashed outputs from the
# previous run, so if this isn't the first run and as long as
# the set of outputs didn't change, we'll be able to get the
# input hashes, even if they were clobbered.
for path in inputs:
self._input("file", path)
if missing_includes:
# Missing \includes are tricky. Ideally we'd depend on
# the absence of some file, but in fact we'd have to
# depend on the failure of a whole kpathsea lookup.
# Rather than try to be clever, just mark this as an
# unknown input so we'll run at least once on the next
# invocation.
self._input("unknown_input")
if not self.__create_outdirs(stdout) and has_errors:
# LaTeX reported unrecoverable errors (other than output
# directory errors, which we just fixed). We could
# continue to stabilize the document, which may change
# some of the other problems reported (but not the
# unrecoverable errors), or we can just abort now and get
# back to the user quickly with the major errors. We opt
# for the latter.
global terminate_task_loop
terminate_task_loop = True
# This error could depend on something we failed to track.
# It would be really confusing if we continued to report
# the error after the user fixed it, so be conservative
# and force a re-run next time.
self._input("unknown_input")
return RunResult(
outputs, {"jobname": jobname, "outname": outname, "status": status}
)
def __feed_terminal(self, stdout):
prefix = "latex"
if self.__pass > 1:
prefix += " ({})".format(self.__pass)
with Progress(prefix) as progress:
buf = []
filt = LaTeXFilter()
while True:
# Use os.read to read only what's available on the pipe,
# without waiting to fill a buffer
data = os.read(stdout.fileno(), 4096)
if not data:
break
# See "A note about encoding" above
data = data.decode("ascii", errors="surrogateescape")
buf.append(data)
filt.feed(data)
file_stack = filt.get_file_stack()
if file_stack:
tos = file_stack[-1]
if tos.startswith("./"):
tos = tos[2:]
progress.update(">" * len(file_stack) + " " + tos)
else:
progress.update("")
# Were there unrecoverable errors?
has_errors = any(msg.typ == "error" for msg in filt.get_messages())
return "".join(buf), has_errors, filt.has_missing_includes()
def __parse_jobname(self, stdout):
"""Extract the job name and output name from latex's output.
We get these from latex because they depend on complicated
file name parsing rules, are affected by arguments like
-output-directory, and may be just "texput" if things fail
really early. The output name may be None if there were no
pages of output.
"""
jobname = outname = None
for m in re.finditer(
r'^Transcript written on "?(.*)\.log"?\.$', stdout, re.MULTILINE | re.DOTALL
):
jobname = m.group(1).replace("\n", "")
if jobname is None:
print(stdout, file=sys.stderr)
raise TaskError("failed to extract job name from latex log")
for m in re.finditer(
r'^Output written on "?(.*\.[^ ."]+)"? \([0-9]+ page',
stdout,
re.MULTILINE | re.DOTALL,
):
outname = m.group(1).replace("\n", "")
if outname is None and not re.search(
r"^No pages of output\.$|^! Emergency stop\.$"
r"|^! ==> Fatal error occurred, no output PDF file produced!$",
stdout,
re.MULTILINE,
):
print(stdout, file=sys.stderr)
raise TaskError("failed to extract output name from latex log")
# LuaTeX (0.76.0) doesn't include the output directory in the
# logged transcript or output file name.
if os.path.basename(jobname) == jobname and os.path.exists(
os.path.join(self.__obj_dir, jobname + ".log")
):
jobname = os.path.join(self.__obj_dir, jobname)
if outname is not None:
outname = os.path.join(self.__obj_dir, outname)
return jobname, outname
def __parse_recorder(self, jobname):
"""Parse file recorder output."""
# XXX If latex fails because a file isn't found, that doesn't
# go into the .fls file, but creating that file will affect
# the computation, so it should be included as an input.
# Though it's generally true that files can be added earlier
# in search paths and will affect the output without us knowing.
#
# XXX This is a serious problem for bibtex, since the first
# run won't depend on the .bbl file! But maybe the .aux file
# will always cause a re-run, at which point the .bbl will
# exist?
filename = jobname + ".fls"
try:
recorder = open(filename)
except OSError as e:
raise TaskError("failed to open file recorder output: " + str(e)) from e
pwd, inputs, outputs = "", set(), set()
for linenum, line in enumerate(recorder):
parts = line.rstrip("\n").split(" ", 1)
if parts[0] == "PWD":
pwd = parts[1]
elif parts[0] in ("INPUT", "OUTPUT"):
if parts[1].startswith("/"):
path = parts[1]
else:
# Try to make "nice" paths, especially for clean
path = os.path.relpath(os.path.join(pwd, parts[1]))
if parts[0] == "INPUT":
inputs.add(path)
else:
outputs.add(path)
else:
raise TaskError(
"syntax error on line {} of {}".format(linenum, filename)
)
# Ironically, latex omits the .fls file itself
outputs.add(filename)
return inputs, outputs
def __create_outdirs(self, stdout):
# In some cases, such as \include'ing a file from a
# subdirectory, TeX will attempt to create files in
# subdirectories of the output directory that don't exist.
# Detect this, create the output directory, and re-run.
m = re.search("^! I can't write on file `(.*)'\\.$", stdout, re.M)
if m and m.group(1).find("/") > 0 and "../" not in m.group(1):
debug("considering creating output sub-directory for {}".format(m.group(1)))
subdir = os.path.dirname(m.group(1))
newdir = os.path.join(self.__obj_dir, subdir)
if os.path.isdir(subdir) and not os.path.isdir(newdir):
debug("creating output subdirectory {}".format(newdir))
try:
mkdir_p(newdir)
except OSError as e:
raise TaskError(
"failed to create output subdirectory: " + str(e)
) from e
self._input("unstable")
return True
def report(self):
extra = self._get_result_extra()
if extra is None:
return 0
# Parse the log
logfile = open(extra["jobname"] + ".log", "rt", errors="surrogateescape")
for msg in self.__clean_messages(
LaTeXFilter(self.__nowarns).feed(logfile.read(), True).get_messages()
):
msg.emit()
# Return LaTeX's exit status
return extra["status"]
def __clean_messages(self, msgs):
"""Make some standard log messages more user-friendly."""
have_undefined_reference = False
for msg in msgs:
if msg.msg == "==> Fatal error occurred, no output PDF file produced!":
msg = msg._replace(
typ="info", msg="Fatal error (no output file produced)"
)
if msg.msg.startswith("[LaTeX] "):
# Strip unnecessary package name
msg = msg._replace(msg=msg.msg.split(" ", 1)[1])
if re.match(r"Reference .* undefined", msg.msg):
have_undefined_reference = True
if have_undefined_reference and re.match(
r"There were undefined references", msg.msg
):
# LaTeX prints this at the end so the user knows it's
# worthwhile looking back at the log. Since latexrun
# makes the earlier messages obvious, this is
# redundant.
continue
yield msg
def get_tex_filename(self):
return self.__tex_filename
def get_jobname(self):
extra = self._get_result_extra()
if extra is None:
return None
return extra["jobname"]
def get_outname(self):
extra = self._get_result_extra()
if extra is None:
return None
return extra["outname"]
def get_status(self):
extra = self._get_result_extra()
if extra is None:
return None
return extra["status"]
class LaTeXCommit(Task):
def __init__(self, db, latex_task, output_path):
super().__init__(
db, "latex_commit::" + normalize_input_path(latex_task.get_tex_filename())
)
self.__latex_task = latex_task
self.__output_path = output_path
self.status = "There were errors"
def _input_latex(self):
return self.__latex_task.get_status(), self.__latex_task.get_outname()
def _execute(self):
self.status = "There were errors"
# If latex succeeded with output, atomically commit the output
status, outname = self._input("latex")
if status != 0 or outname is None:
debug("not committing (status {}, outname {})", status, outname)
if outname is None:
self.status = "No pages of output"
return RunResult([], None)
commit = self.__output_path or os.path.basename(outname)
if os.path.abspath(commit) == os.path.abspath(outname):
debug("skipping commit (outname is commit name)")
self.status = None
return RunResult([], None)
try:
if os.path.exists(commit) and filecmp.cmp(outname, commit):
debug("skipping commit ({} and {} are identical)", outname, commit)
# To avoid confusion, touch the output file
open(outname, "r+b").close()
else:
debug("commiting {} to {}", outname, commit)
shutil.copy(outname, outname + "~")
os.rename(outname + "~", commit)
except OSError as e:
raise TaskError("error committing latex output: {}".format(e)) from e
self._input("file", outname)
self.status = None
return RunResult([commit], None)
class LaTeXFilter:
TRACE = False # Set to enable detailed parse tracing
def __init__(self, nowarns=[]):
self.__data = ""
self.__restart_pos = 0
self.__restart_file_stack = []
self.__restart_messages_len = 0
self.__messages = []
self.__first_file = None
self.__fatal_error = False
self.__missing_includes = False
self.__pageno = 1
self.__restart_pageno = 1
self.__suppress = {cls: 0 for cls in nowarns}
def feed(self, data, eof=False):
"""Feed LaTeX log data to the parser.
The log data can be from LaTeX's standard output, or from the
log file. If there will be no more data, set eof to True.
"""
self.__data += data
self.__data_complete = eof
# Reset to last known-good restart point
self.__pos = self.__restart_pos
self.__file_stack = self.__restart_file_stack.copy()
self.__messages = self.__messages[: self.__restart_messages_len]
self.__lstart = self.__lend = -1
self.__pageno = self.__restart_pageno
# Parse forward
while self.__pos < len(self.__data):
self.__noise()
# Handle suppressed warnings
if eof:
msgs = [
"%d %s warning%s" % (count, cls, "s" if count > 1 else "")
for cls, count in self.__suppress.items()
if count
]
if msgs:
self.__message(
"info",
None,
"%s not shown (use -Wall to show them)" % ", ".join(msgs),
filename=self.__first_file,
)
if eof and len(self.__file_stack) and not self.__fatal_error:
# Fatal errors generally cause TeX to "succumb" without
# closing the file stack, so don't complain in that case.
self.__message(
"warning", None, "unbalanced `(' in log; file names may be wrong"
)
return self
def get_messages(self):
"""Return a list of warning and error Messages."""
return self.__messages
def get_file_stack(self):
"""Return the file stack for the data that has been parsed.
This results a list from outermost file to innermost file.
The list may be empty.
"""
return self.__file_stack
def has_missing_includes(self):
"""Return True if the log reported missing \\include files."""
return self.__missing_includes
def __save_restart_point(self):
"""Save the current state as a known-good restart point.
On the next call to feed, the parser will reset to this point.
"""
self.__restart_pos = self.__pos
self.__restart_file_stack = self.__file_stack.copy()
self.__restart_messages_len = len(self.__messages)
self.__restart_pageno = self.__pageno
def __message(self, typ, lineno, msg, cls=None, filename=None):
if cls is not None and cls in self.__suppress:
self.__suppress[cls] += 1
return
filename = filename or (
self.__file_stack[-1] if self.__file_stack else self.__first_file
)
self.__messages.append(Message(typ, filename, lineno, msg))
def __ensure_line(self):
"""Update lstart and lend."""
if self.__lstart <= self.__pos < self.__lend:
return
self.__lstart = self.__data.rfind("\n", 0, self.__pos) + 1
self.__lend = self.__data.find("\n", self.__pos) + 1
if self.__lend == 0:
self.__lend = len(self.__data)
@property
def __col(self):
"""The 0-based column number of __pos."""
self.__ensure_line()
return self.__pos - self.__lstart
@property
def __avail(self):
return self.__pos < len(self.__data)
def __lookingat(self, needle):
return self.__data.startswith(needle, self.__pos)
def __lookingatre(self, regexp, flags=0):
return re.compile(regexp, flags=flags).match(self.__data, self.__pos)
def __skip_line(self):
self.__ensure_line()
self.__pos = self.__lend
def __consume_line(self, unwrap=False):
self.__ensure_line()
data = self.__data[self.__pos : self.__lend]
self.__pos = self.__lend
if unwrap:
# TeX helpfully wraps all terminal output at 79 columns
# (max_print_line). If requested, unwrap it. There's
# simply no way to do this perfectly, since there could be
# a line that happens to be 79 columns.
#
# We check for >=80 because a bug in LuaTeX causes it to
# wrap at 80 columns instead of 79 (LuaTeX #900).
while self.__lend - self.__lstart >= 80:
if self.TRACE:
print("<{}> wrapping".format(self.__pos))
self.__ensure_line()
data = data[:-1] + self.__data[self.__pos : self.__lend]
self.__pos = self.__lend
return data
# Parser productions
def __noise(self):
# Most of TeX's output is line noise that combines error
# messages, warnings, file names, user errors and warnings,
# and echos of token lists and other input. This attempts to
# tease these apart, paying particular attention to all of the
# places where TeX echos input so that parens in the input do
# not confuse the file name scanner. There are three
# functions in TeX that echo input: show_token_list (used by
# runaway and show_context, which is used by print_err),
# short_display (used by overfull/etc h/vbox), and show_print
# (used in issue_message and the same places as
# show_token_list).
lookingat, lookingatre = self.__lookingat, self.__lookingatre
if self.__col == 0:
# The following messages are always preceded by a newline
if lookingat("! "):
return self.__errmessage()
if lookingat("!pdfTeX error: "):
return self.__pdftex_fail()
if lookingat("Runaway "):
return self.__runaway()
if lookingatre(r"(Overfull|Underfull|Loose|Tight) \\[hv]box \("):
return self.__bad_box()
if lookingatre("(Package |Class |LaTeX |pdfTeX )?(\w+ )?warning: ", re.I):
return self.__generic_warning()
if lookingatre("No file .*\\.tex\\.$", re.M):
# This happens with \includes of missing files. For
# whatever reason, LaTeX doesn't consider this even
# worth a warning, but I do!
self.__message(
"warning",
None,
self.__simplify_message(self.__consume_line(unwrap=True).strip()),
)
self.__missing_includes = True
return
# Other things that are common and irrelevant
if lookingatre(r"(Package|Class|LaTeX) (\w+ )?info: ", re.I):
return self.__generic_info()
if lookingatre(r"(Document Class|File|Package): "):
# Output from "\ProvidesX"
return self.__consume_line(unwrap=True)
if lookingatre(r"\\\w+=\\[a-z]+\d+\n"):
# Output from "\new{count,dimen,skip,...}"
return self.__consume_line(unwrap=True)
# print(self.__data[self.__lstart:self.__lend].rstrip())
# self.__pos = self.__lend
# return
# Now that we've substantially reduced the spew and hopefully
# eliminated all input echoing, we're left with the file name
# stack, page outs, and random other messages from both TeX
# and various packages. We'll assume at this point that all
# parentheses belong to the file name stack or, if they're in
# random other messages, they're at least balanced and nothing
# interesting happens between them. For page outs, ship_out
# prints a space if not at the beginning of a line, then a
# "[", then the page number being shipped out (this is
# usually, but not always, followed by "]").
m = re.compile(r"[(){}\n]|(?<=[\n ])\[\d+", re.M).search(
self.__data, self.__pos
)
if m is None:
self.__pos = len(self.__data)
return
self.__pos = m.start() + 1
ch = self.__data[m.start()]
if ch == "\n":
# Save this as a known-good restart point for incremental
# parsing, since we definitely didn't match any of the
# known message types above.
self.__save_restart_point()
elif ch == "[":
# This is printed at the end of a page, so we're beginning
# page n+1.
self.__pageno = int(self.__lookingatre(r"\d+").group(0)) + 1
elif (
self.__data.startswith("`", m.start() - 1)
or self.__data.startswith("`\\", m.start() - 2)
) and self.__data.startswith("'", m.start() + 1):
# (, ), {, and } sometimes appear in TeX's error
# descriptions, but they're always in `'s (and sometimes
# backslashed)
return
elif ch == "(":
# XXX Check that the stack doesn't drop to empty and then re-grow
first = self.__first_file is None and self.__col == 1
filename = self.__filename()
self.__file_stack.append(filename)
if first:
self.__first_file = filename
if self.TRACE:
print(
"<{}>{}enter {}".format(
m.start(), " " * len(self.__file_stack), filename
)
)
elif ch == ")":
if len(self.__file_stack):
if self.TRACE:
print(
"<{}>{}exit {}".format(
m.start(),
" " * len(self.__file_stack),
self.__file_stack[-1],
)
)
self.__file_stack.pop()
else:
self.__message(
"warning", None, "extra `)' in log; file names may be wrong "
)
elif ch == "{":
# TeX uses this for various things we want to ignore, like
# file names and print_mark. Consume up to the '}'
epos = self.__data.find("}", self.__pos)
if epos != -1:
self.__pos = epos + 1
else:
self.__message(
"warning", None, "unbalanced `{' in log; file names may be wrong"
)
elif ch == "}":
self.__message("warning", None, "extra `}' in log; file names may be wrong")
def __filename(self):
initcol = self.__col
first = True
name = ""
# File names may wrap, but if they do, TeX will always print a
# newline before the open paren
while first or (initcol == 1 and self.__lookingat("\n") and self.__col >= 79):
if not first:
self.__pos += 1
m = self.__lookingatre(r"[^(){} \n]*")
name += m.group()
self.__pos = m.end()
first = False
return name
def __simplify_message(self, msg):
msg = re.sub(
r"^(?:Package |Class |LaTeX |pdfTeX )?([^ ]+) (?:Error|Warning): ",
r"[\1] ",
msg,
flags=re.I,
)
msg = re.sub(r"\.$", "", msg)
msg = re.sub(r"has occurred (while \\output is active)", r"\1", msg)
return msg
def __errmessage(self):
# Procedure print_err (including \errmessage, itself used by
# LaTeX's \GenericError and all of its callers), as well as
# fatal_error. Prints "\n! " followed by error text
# ("Emergency stop" in the case of fatal_error). print_err is
# always followed by a call to error, which prints a period,
# and a newline...
msg = self.__consume_line(unwrap=True)[1:].strip()
is_fatal_error = msg == "Emergency stop."
msg = self.__simplify_message(msg)
# ... and then calls show_context, which prints the input
# stack as pairs of lines giving the context. These context
# lines are truncated so they never wrap. Each pair of lines
# will start with either "<something> " if the context is a
# token list, "<*> " for terminal input (or command line),
# "<read ...>" for stream reads, something like "\macroname
# #1->" for macros (though everything after \macroname is
# subject to being elided as "..."), or "l.[0-9]+ " if it's a
# file. This is followed by the errant input with a line
# break where the error occurred.
lineno = None
found_context = False
stack = []
while self.__avail:
m1 = self.__lookingatre(r"<([a-z ]+|\*|read [^ >]*)> |\\.*(->|...)")
m2 = self.__lookingatre("l\.[0-9]+ ")
if m1:
found_context = True
pre = self.__consume_line().rstrip("\n")
stack.append(pre)
elif m2:
found_context = True
pre = self.__consume_line().rstrip("\n")
info, rest = pre.split(" ", 1)
lineno = int(info[2:])
stack.append(rest)
elif found_context:
# Done with context
break
if found_context:
# Consume the second context line
post = self.__consume_line().rstrip("\n")
# Clean up goofy trailing ^^M TeX sometimes includes
post = re.sub(r"\^\^M$", "", post)
if post[: len(pre)].isspace() and not post.isspace():
stack.append(len(stack[-1]))
stack[-2] += post[len(pre) :]
else:
# If we haven't found the context, skip the line.
self.__skip_line()
stack_msg = ""
for i, trace in enumerate(stack):
stack_msg += (
"\n " + (" " * trace) + "^"
if isinstance(trace, int)
else "\n at " + trace.rstrip()
if i == 0
else "\n from " + trace.rstrip()
)
if is_fatal_error:
# fatal_error always prints one additional line of message
info = self.__consume_line().strip()
if info.startswith("*** "):
info = info[4:]
msg += ": " + info.lstrip("(").rstrip(")")
self.__message("error", lineno, msg + stack_msg)
self.__fatal_error = True
def __pdftex_fail(self):
# Procedure pdftex_fail. Prints "\n!pdfTeX error: ", the
# message, and a newline. Unlike print_err, there's never
# context.
msg = self.__consume_line(unwrap=True)[1:].strip()
msg = self.__simplify_message(msg)
self.__message("error", None, msg)
def __runaway(self):
# Procedure runaway. Prints "\nRunaway ...\n" possibly
# followed by token list (user text). Always followed by a
# call to print_err, so skip lines until we see the print_err.
self.__skip_line() # Skip "Runaway ...\n"
if not self.__lookingat("! ") and self.__avail:
# Skip token list, which is limited to one line
self.__skip_line()
def __bad_box(self):
# Function hpack and vpack. hpack prints a warning, a
# newline, then a short_display of the offending text.
# Unfortunately, there's nothing indicating the end of the
# offending text, but it should be on one (possible wrapped)
# line. vpack prints a warning and then, *unless output is
# active*, a newline. The missing newline is probably a bug,
# but it sure makes our lives harder.
origpos = self.__pos
msg = self.__consume_line()
m = re.search(
r" in (?:paragraph|alignment) at lines ([0-9]+)--([0-9]+)", msg
) or re.search(r" detected at line ([0-9]+)", msg)
if m:
# Sometimes TeX prints crazy line ranges like "at lines
# 8500--250". The lower number seems roughly sane, so use
# that. I'm not sure what causes this, but it may be
# related to shipout routines messing up line registers.
lineno = min(int(m.group(1)), int(m.groups()[-1]))
msg = msg[: m.start()]
else:
m = re.search(r" while \\output is active", msg)
if m:
lineno = None
msg = msg[: m.end()]
else:
self.__message("warning", None, "malformed bad box message in log")
return
# Back up to the end of the known message text
self.__pos = origpos + m.end()
if self.__lookingat("\n"):
# We have a newline, so consume it and look for the
# offending text.
self.__pos += 1
# If there is offending text, it will start with a font
# name, which will start with a \.
if "hbox" in msg and self.__lookingat("\\"):
self.__consume_line(unwrap=True)
msg = self.__simplify_message(msg) + " (page {})".format(self.__pageno)
cls = msg.split(None, 1)[0].lower()
self.__message("warning", lineno, msg, cls=cls)
def __generic_warning(self):
# Warnings produced by LaTeX's \GenericWarning (which is
# called by \{Package,Class}Warning and \@latex@warning),
# warnings produced by pdftex_warn, and other random warnings.
msg, cls = self.__generic_info()
# Most warnings include an input line emitted by \on@line
m = re.search(" on input line ([0-9]+)", msg)
if m:
lineno = int(m.group(1))
msg = msg[: m.start()]
else:
lineno = None
msg = self.__simplify_message(msg)
self.__message("warning", lineno, msg, cls=cls)
def __generic_info(self):
# Messages produced by LaTeX's \Generic{Error,Warning,Info}
# and things that look like them
msg = self.__consume_line(unwrap=True).strip()
# Package and class messages are continued with lines
# containing '(package name) '
pkg_name = msg.split(" ", 2)[1]
prefix = "(" + pkg_name + ") "
while self.__lookingat(prefix):
# Collect extra lines. It's important that we keep these
# because they may contain context information like line
# numbers.
extra = self.__consume_line(unwrap=True)
msg += " " + extra[len(prefix) :].strip()
return msg, pkg_name.lower()
##################################################################
# BibTeX task
#
class BibTeX(Task):
def __init__(self, db, latex_task, cmd, cmd_args, nowarns, obj_dir):
super().__init__(
db, "bibtex::" + normalize_input_path(latex_task.get_tex_filename())
)
self.__latex_task = latex_task
self.__cmd = cmd
self.__cmd_args = cmd_args
self.__obj_dir = obj_dir
def stable(self):
# If bibtex doesn't have its inputs, then it's stable because
# it has no effect on system state.
jobname = self.__latex_task.get_jobname()
if jobname is None:
# We don't know where the .aux file is until latex has run
return True
if not os.path.exists(jobname + ".aux"):
# Input isn't ready, so bibtex will simply fail without
# affecting system state. Hence, this task is trivially
# stable.
return True
if not self.__find_bib_cmds(os.path.dirname(jobname), jobname + ".aux"):
# The tex file doesn't refer to any bibliographic data, so
# don't run bibtex.
return True
return super().stable()
def __find_bib_cmds(self, basedir, auxname, stack=()):
debug("scanning for bib commands in {}".format(auxname))
if auxname in stack:
raise TaskError(".aux file loop")
stack = stack + (auxname,)
try:
aux_data = open(auxname, errors="surrogateescape").read()
except FileNotFoundError:
# The aux file may not exist if latex aborted
return False
if re.search(r"^\\bibstyle\{", aux_data, flags=re.M) or re.search(
r"^\\bibdata\{", aux_data, flags=re.M
):
return True
if re.search(r"^\\abx@aux@cite\{", aux_data, flags=re.M):
# biber citation
return True
# Recurse into included aux files (see aux_input_command), in
# case \bibliography appears in an \included file.
for m in re.finditer(r"^\\@input\{([^}]*)\}", aux_data, flags=re.M):
if self.__find_bib_cmds(basedir, os.path.join(basedir, m.group(1)), stack):
return True
return False
def _input_args(self):
if self.__is_biber():
aux_name = os.path.basename(self.__latex_task.get_jobname())
else:
aux_name = os.path.basename(self.__latex_task.get_jobname()) + ".aux"
return [self.__cmd] + self.__cmd_args + [aux_name]
def _input_cwd(self):
return os.path.dirname(self.__latex_task.get_jobname())
def _input_auxfile(self, auxname):
# We don't consider the .aux files regular inputs.
# Instead, we extract just the bit that BibTeX cares about
# and depend on that. See get_aux_command_and_process in
# bibtex.web.
debug("hashing filtered aux file {}", auxname)
try:
with open(auxname, "rb") as aux:
h = hashlib.sha256()
for line in aux:
if line.startswith(
(
b"\\citation{",
b"\\bibdata{",
b"\\bibstyle{",
b"\\@input{",
b"\\abx@aux@cite{",
)
):
h.update(line)
return h.hexdigest()
except FileNotFoundError:
debug("{} does not exist", auxname)
return None
def __path_join(self, first, rest):
if rest is None:
# Append ':' to keep the default search path
return first + ":"
return first + ":" + rest
def __is_biber(self):
return "biber" in self.__cmd
def _execute(self):
# This gets complicated when \include is involved. \include
# switches to a different aux file and records its path in the
# main aux file. However, BibTeX does not consider this path
# to be relative to the location of the main aux file, so we
# have to run BibTeX *in the output directory* for it to
# follow these includes (there's no way to tell BibTeX other
# locations to search). Unfortunately, this means BibTeX will
# no longer be able to find local bib or bst files, but so we
# tell it where to look by setting BIBINPUTS and BSTINPUTS
# (luckily we can control this search). We have to pass this
# same environment down to Kpathsea when we resolve the paths
# in BibTeX's log.
args, cwd = self._input("args"), self._input("cwd")
debug("running {} in {}", args, cwd)
env = os.environ.copy()
env["BIBINPUTS"] = self.__path_join(os.getcwd(), env.get("BIBINPUTS"))
env["BSTINPUTS"] = self.__path_join(os.getcwd(), env.get("BSTINPUTS"))
try:
verbose_cmd(args, cwd, env)
p = subprocess.Popen(
args,
cwd=cwd,
env=env,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout = self.__feed_terminal(p.stdout)
status = p.wait()
except OSError as e:
raise TaskError("failed to execute bibtex task: " + str(e)) from e
inputs, auxnames, outbase = self.__parse_inputs(stdout, cwd, env)
if not inputs and not auxnames:
# BibTeX failed catastrophically.
print(stdout, file=sys.stderr)
raise TaskError("failed to execute bibtex task")
# Register environment variable inputs
for env_var in ["TEXMFOUTPUT", "BSTINPUTS", "BIBINPUTS", "PATH"]:
self._input("env", env_var)
# Register file inputs
for path in auxnames:
self._input("auxfile", path)
for path in inputs:
self._input("file", path)
if self.__is_biber():
outbase = os.path.join(cwd, outbase)
outputs = [outbase + ".bbl", outbase + ".blg"]
return RunResult(
outputs, {"outbase": outbase, "status": status, "inputs": inputs}
)
def __feed_terminal(self, stdout):
with Progress("bibtex") as progress:
buf, linebuf = [], ""
while True:
data = os.read(stdout.fileno(), 4096)
if not data:
break
# See "A note about encoding" above
data = data.decode("ascii", errors="surrogateescape")
buf.append(data)
linebuf += data
while "\n" in linebuf:
line, _, linebuf = linebuf.partition("\n")
if line.startswith("Database file"):
progress.update(line.split(": ", 1)[1])
return "".join(buf)
def __parse_inputs(self, log, cwd, env):
# BibTeX conveniently logs every file that it opens, and its
# log is actually sensible (see calls to a_open_in in
# bibtex.web.) The only trick is that these file names are
# pre-kpathsea lookup and may be relative to the directory we
# ran BibTeX in.
#
# Because BibTeX actually depends on very little in the .aux
# file (and it's likely other things will change in the .aux
# file), we don't count the whole .aux file as an input, but
# instead depend only on the lines that matter to BibTeX.
kpathsea = Kpathsea("bibtex")
inputs = []
auxnames = []
outbase = None
for line in log.splitlines():
m = re.match(
"(?:The top-level auxiliary file:"
"|A level-[0-9]+ auxiliary file:) (.*)",
line,
)
if m:
auxnames.append(os.path.join(cwd, m.group(1)))
continue
m = re.match("(?:(The style file:)|(Database file #[0-9]+:)) (.*)", line)
if m:
filename = m.group(3)
if m.group(1):
filename = kpathsea.find_file(filename, "bst", cwd, env)
elif m.group(2):
filename = kpathsea.find_file(filename, "bib", cwd, env)
# If this path is relative to the source directory,
# clean it up for error reporting and portability of
# the dependency DB
if filename.startswith("/"):
relname = os.path.relpath(filename)
if "../" not in relname:
filename = relname
inputs.append(filename)
# biber output
m = re.search("Found BibTeX data source '(.*?)'", line)
if m:
filename = m.group(1)
inputs.append(filename)
m = re.search("Logfile is '(.*?)'", line)
if m:
outbase = m.group(1)[:-4]
if outbase is None:
outbase = auxnames[0][:-4]
return inputs, auxnames, outbase
def report(self):
extra = self._get_result_extra()
if extra is None:
return 0
# Parse and pretty-print the log
log = open(extra["outbase"] + ".blg", "rt").read()
inputs = extra["inputs"]
for msg in BibTeXFilter(log, inputs).get_messages():
msg.emit()
# BibTeX exits with 1 if there are warnings, 2 if there are
# errors, and 3 if there are fatal errors (sysdep.h).
# Translate to a normal UNIX exit status.
if extra["status"] >= 2:
return 1
return 0
class BibTeXFilter:
def __init__(self, data, inputs):
self.__inputs = inputs
self.__key_locs = None
self.__messages = []
prev_line = ""
for line in data.splitlines():
msg = self.__process_line(prev_line, line)
if msg is not None:
self.__messages.append(Message(*msg))
prev_line = line
def get_messages(self):
"""Return a list of warning and error Messages."""
# BibTeX reports most errors in no particular order. Sort by
# file and line.
return sorted(
self.__messages, key=lambda msg: (msg.filename or "", msg.lineno or 0)
)
def __process_line(self, prev_line, line):
m = None
def match(regexp):
nonlocal m
m = re.match(regexp, line)
return m
# BibTeX has many error paths, but luckily the set is closed,
# so we can find all of them. This first case is the
# workhorse format.
#
# AUX errors: aux_err/aux_err_return/aux_err_print
#
# BST errors: bst_ln_num_print/bst_err/
# bst_err_print_and_look_for_blank_line_return/
# bst_warn_print/bst_warn/
# skip_token/skip_token_print/
# bst_ext_warn/bst_ext_warn_print/
# bst_ex_warn/bst_ex_warn_print/
# bst_mild_ex_warn/bst_mild_ex_warn_print/
# bst_string_size_exceeded
#
# BIB errors: bib_ln_num_print/
# bib_err_print/bib_err/
# bib_warn_print/bib_warn/
# bib_one_of_two_expected_err/macro_name_warning/
if match("(.*?)---?line ([0-9]+) of file (.*)"):
# Sometimes the real error is printed on the previous line
if m.group(1) == "while executing":
# bst_ex_warn. The real message is on the previous line
text = prev_line
else:
text = m.group(1) or prev_line
typ, msg = self.__canonicalize(text)
return (typ, m.group(3), int(m.group(2)), msg)
# overflow/print_overflow
if match("Sorry---you've exceeded BibTeX's (.*)"):
return ("error", None, None, "capacity exceeded: " + m.group(1))
# confusion/print_confusion
if match("(.*)---this can't happen$"):
return ("error", None, None, "internal error: " + m.group(1))
# aux_end_err
if match("I found (no .*)---while reading file (.*)"):
return ("error", m.group(2), None, m.group(1))
# bad_cross_reference_print/
# nonexistent_cross_reference_error/
# @<Complain about a nested cross reference@>
#
# This is split across two lines. Match the second.
if match('^refers to entry "'):
typ, msg = self.__canonicalize(prev_line + " " + line)
msg = re.sub("^a (bad cross reference)", "\\1", msg)
# Try to give this key a location
filename = lineno = None
m2 = re.search(r'--entry "[^"]"', prev_line)
if m2:
filename, lineno = self.__find_key(m2.group(1))
return (typ, filename, lineno, msg)
# print_missing_entry
if match('Warning--I didn\'t find a database entry for (".*")'):
return ("warning", None, None, "no database entry for " + m.group(1))
# x_warning
if match("Warning--(.*)"):
# Most formats give warnings about "something in <key>".
# Try to match it up.
filename = lineno = None
for m2 in reversed(list(re.finditer(r" in ([^, \t\n]+)\b", line))):
if m2:
filename, lineno = self.__find_key(m2.group(1))
if filename:
break
return ("warning", filename, lineno, m.group(1))
# @<Clean up and leave@>
if match("Aborted at line ([0-9]+) of file (.*)"):
return ("info", m.group(2), int(m.group(1)), "aborted")
# biber type errors
if match("^.*> WARN - (.*)$"):
print("warning", None, None, m.group(1))
m2 = re.match("(.*) in file '(.*?)', skipping ...", m.group(1))
if m2:
return ("warning", m2.group(2), "0", m2.group(1))
return ("warning", None, None, m.group(1))
if match("^.*> ERROR - (.*)$"):
m2 = re.match("BibTeX subsystem: (.*?), line (\d+), (.*)$", m.group(1))
if m2:
return ("error", m2.group(1), m2.group(2), m2.group(3))
return ("error", None, None, m.group(1))
def __canonicalize(self, msg):
if msg.startswith("Warning"):
msg = re.sub("^Warning-*", "", msg)
typ = "warning"
else:
typ = "error"
msg = re.sub("^I('m| was)? ", "", msg)
msg = msg[:1].lower() + msg[1:]
return typ, msg
def __find_key(self, key):
if self.__key_locs is None:
p = BibTeXKeyParser()
self.__key_locs = {}
for filename in self.__inputs:
data = open(filename, "rt", errors="surrogateescape").read()
for pkey, lineno in p.parse(data):
self.__key_locs.setdefault(pkey, (filename, lineno))
return self.__key_locs.get(key, (None, None))
class BibTeXKeyParser:
"""Just enough of a BibTeX parser to find keys."""
def parse(self, data):
IDENT_RE = "(?![0-9])([^\x00-\x20\x80-\xff \t\"#%'(),={}]+)"
self.__pos, self.__data = 0, data
# Find the next entry
while self.__consume("[^@]*@[ \t\n]*"):
# What type of entry?
if not self.__consume(IDENT_RE + "[ \t\n]*"):
continue
typ = self.__m.group(1)
if typ == "comment":
continue
start = self.__pos
if not self.__consume("([{(])[ \t\n]*"):
continue
closing, key_re = {"{": ("}", "([^, \t\n}]*)"), "(": (")", "([^, \t\n]*)")}[
self.__m.group(1)
]
if typ not in ("preamble", "string"):
# Regular entry; get key
if self.__consume(key_re):
yield self.__m.group(1), self.__lineno()
# Consume body of entry
self.__pos = start
self.__balanced(closing)
def __consume(self, regexp):
self.__m = re.compile(regexp).match(self.__data, self.__pos)
if self.__m:
self.__pos = self.__m.end()
return self.__m
def __lineno(self):
return self.__data.count("\n", 0, self.__pos) + 1
def __balanced(self, closing):
self.__pos += 1
level = 0
skip = re.compile("[{}" + closing + "]")
while True:
m = skip.search(self.__data, self.__pos)
if not m:
break
self.__pos = m.end()
ch = m.group(0)
if level == 0 and ch == closing:
break
elif ch == "{":
level += 1
elif ch == "}":
level -= 1
class Kpathsea:
def __init__(self, program_name):
self.__progname = program_name
def find_file(self, name, format, cwd=None, env=None):
"""Return the resolved path of 'name' or None."""
args = ["kpsewhich", "-progname", self.__progname, "-format", format, name]
try:
verbose_cmd(args, cwd, env)
path = subprocess.check_output(
args, cwd=cwd, env=env, universal_newlines=True
).strip()
except subprocess.CalledProcessError as e:
if e.returncode != 1:
raise
return None
if cwd is None:
return path
return os.path.join(cwd, path)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#
# alimaster/gui/simple_window.py
#
"""
Defines a 'simple_window' which should be subclassed to a specific window.
"""
from tkinter import * # noqa
from tkinter.ttk import * # noqa
class SimpleWindow():
"""
A Simple window which should be subclassed. This class is not tied to
alimaster.app like the alimaster.gui.Window class is.
The class automatically generates any ImageTk's upon creation. To use this,
simply create a class-level dict 'imgs' which maps strings to images
(recommended created via FontAwesome.generate_icon function).
If this window is specific to to AliMaster's main window, your class should
subclass alimaster.gui.Window, which is a SimpleWindow that is
automatically bound to the masterwindow.
"""
imgs = dict()
def __init__(self,
root,
title,
minsize=(400, 200),
auto_close_window=True
):
"""
Construct Simple Window
"""
self._generate_icons()
self.root = root
self.root.title(title)
if auto_close_window:
self.root.protocol("WM_DELETE_WINDOW", self.stop_tk_root)
self.root.minsize(*minsize)
self.frame = Frame(self.root)
self.frame.pack(fill=BOTH, expand=1)
self.on_focus = lambda event: True
self.root.bind("<FocusIn>", self._on_focus)
def stop_tk_root(self):
self.root.after(0, self.root.quit)
def _on_focus(self, event):
"""
A 'filter' function which propagates a focus event only if the main
widget was the toplevel of the function
"""
if event.widget is not self.window:
return
self.on_focus(event)
@classmethod
def _generate_icons(cls):
"""
Generates the icons found in the class's imgs dict. Should only run
once the first time one of these windows is created.
"""
if len(cls.imgs) == 0 or any(map(
lambda obj: isinstance(obj, ImageTk.PhotoImage),
cls.imgs.values())):
return
for key, val in cls.imgs.items():
cls.imgs[key] = ImageTk.PhotoImage(val) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.serialization;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.header.Headers;
import java.nio.ByteBuffer;
public class BooleanDeserializer implements Deserializer<Boolean> {
private static final byte TRUE = 0x01;
private static final byte FALSE = 0x00;
@Override
public Boolean deserialize(final String topic, final byte[] data) {
if (data == null) {
return null;
}
if (data.length != 1) {
throw new SerializationException("Size of data received by BooleanDeserializer is not 1");
}
if (data[0] == TRUE) {
return true;
} else if (data[0] == FALSE) {
return false;
} else {
throw new SerializationException("Unexpected byte received by BooleanDeserializer: " + data[0]);
}
}
@Override
public Boolean deserialize(String topic, Headers headers, ByteBuffer data) {
if (data == null) {
return null;
}
if (data.remaining() != 1) {
throw new SerializationException("Size of data received by BooleanDeserializer is not 1");
}
final byte b = data.get(data.position());
if (b == TRUE) {
return true;
} else if (b == FALSE) {
return false;
} else {
throw new SerializationException("Unexpected byte received by BooleanDeserializer: " + b);
}
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/serialization/BooleanDeserializer.java |
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
def get_manager(cls):
"""
Returns the appropriate Manager class for a given Model class. It
does this by looking in the boto config for a section like this::
[DB]
db_type = SimpleDB
db_user = <aws access key id>
db_passwd = <aws secret access key>
db_name = my_domain
[DB_TestBasic]
db_type = SimpleDB
db_user = <another aws access key id>
db_passwd = <another aws secret access key>
db_name = basic_domain
db_port = 1111
The values in the DB section are "generic values" that will be used
if nothing more specific is found. You can also create a section for
a specific Model class that gives the db info for that class.
In the example above, TestBasic is a Model subclass.
"""
db_user = boto.config.get('DB', 'db_user', None)
db_passwd = boto.config.get('DB', 'db_passwd', None)
db_type = boto.config.get('DB', 'db_type', 'SimpleDB')
db_name = boto.config.get('DB', 'db_name', None)
db_table = boto.config.get('DB', 'db_table', None)
db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com")
db_port = boto.config.getint('DB', 'db_port', 443)
enable_ssl = boto.config.getbool('DB', 'enable_ssl', True)
sql_dir = boto.config.get('DB', 'sql_dir', None)
debug = boto.config.getint('DB', 'debug', 0)
# first see if there is a fully qualified section name in the Boto config
module_name = cls.__module__.replace('.', '_')
db_section = 'DB_' + module_name + '_' + cls.__name__
if not boto.config.has_section(db_section):
db_section = 'DB_' + cls.__name__
if boto.config.has_section(db_section):
db_user = boto.config.get(db_section, 'db_user', db_user)
db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd)
db_type = boto.config.get(db_section, 'db_type', db_type)
db_name = boto.config.get(db_section, 'db_name', db_name)
db_table = boto.config.get(db_section, 'db_table', db_table)
db_host = boto.config.get(db_section, 'db_host', db_host)
db_port = boto.config.getint(db_section, 'db_port', db_port)
enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl)
debug = boto.config.getint(db_section, 'debug', debug)
elif hasattr(cls, "_db_name") and cls._db_name is not None:
# More specific then the generic DB config is any _db_name class property
db_name = cls._db_name
elif hasattr(cls.__bases__[0], "_manager"):
return cls.__bases__[0]._manager
if db_type == 'SimpleDB':
from boto.sdb.db.manager.sdbmanager import SDBManager
return SDBManager(cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, sql_dir, enable_ssl)
elif db_type == 'XML':
from boto.sdb.db.manager.xmlmanager import XMLManager
return XMLManager(cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, sql_dir, enable_ssl)
else:
raise ValueError('Unknown db_type: %s' % db_type) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# _ _ _ _____ _ _ _____ _ _ ___ ___ _ __
# /_\ | | |_ _| |_ (_)_ _ __ _ __|_ _|_ _| | |__ / __| \| |/ /
# / _ \| | | | | | ' \| | ' \/ _` (_-< | |/ _` | | / / \__ \ |) | ' <
# /_/ \_\_|_| |_| |_||_|_|_||_\__, /__/ |_|\__,_|_|_\_\ |___/___/|_|\_\
# |___/
#
# Copyright 2017 AllThingsTalk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# AllThingsTalk LED Actuator experiment
#
# Before running this experiment, make certain that grovepi3 and allthingstalk
# libraries are installed and globally accessible.
#
import time
import RPi.GPIO as GPIO
from allthingstalk import Device, BooleanAsset, Client, Asset
# Parameters used to authorize and identify your device
# Get them on maker.allthingstalk.com
DEVICE_TOKEN = '<DEVICE_TOKEN>'
DEVICE_ID = '<DEVICE_ID>'
class LedActuator(Device):
led = BooleanAsset(kind=Asset.ACTUATOR)
# Authorize and connect your device with the Cloud
client = Client(DEVICE_TOKEN)
device = LedActuator(client=client, id=DEVICE_ID)
# LED is connected to GPIO4
led_pin = 4
# Led's pin needs to be in OUTPUT mode
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_pin, GPIO.OUT)
@LedActuator.command.led
def on_led(device, value, at):
# Turn led On or Off depending on the received Command
GPIO.output(led_pin, value)
# Send value to the cloud to reflect physical state of the led
device.led = value
print('Led state updated to %s.' % value)
while True:
print('Waiting for actuation...')
time.sleep(5) | unknown | codeparrot/codeparrot-clean | ||
"""
Semi-Markov Spam Filter of Doom
Filters spam, but not as well as other filters. Mostly intended as an experiment in the use of Markov chain-like objects for natural language analysis.
Copyright (c) 2009 Matthew Croop
Copyright (c) 2009 Albert Sun
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
A copy the GNU General Public Licence version 3 is provide along
with this program in the file name LICENCE. If not, see
<http://www.gnu.org/licenses/>.
"""
import os
def OPENIter(filelist):
"""Returns an open file for each path in the given list"""
for filename in filelist:
try:
if type(filename) == str:
theFile = open(filename)
yield theFile
theFile.close()
elif type(filename) == file:
yield filename
else:
print "Not a file or filename:", filename
except IOError,e:
print "[IOError]",e
def directoryIter(directory):
"""Returns a path for each item in the given directory (string)"""
for filename in os.listdir(directory):
yield os.path.join(directory, filename)
def directoryIterRecurse(directory):
"""Returns a path for each item in the given directory (string) and all its subdirectories"""
for dirpath, dirnames, filenames in os.walk(directory):
for i in filenames:
yield os.path.join(dirpath, i)
def fileIndexIter(indexfilename, type):
"""Returns a path for each item in the given index (filename) whose first word is type"""
try:
indexFile = open(indexfilename)
for i in indexFile:
thistype, path = i.strip().split(" ")
if thistype == type:
yield os.path.normpath(os.path.join(indexfilename, "..", path))
indexFile.close()
except IOError,e:
print "[IOError]",e | unknown | codeparrot/codeparrot-clean | ||
from unittest import mock
from django.apps.registry import Apps, apps
from django.contrib.contenttypes import management as contenttypes_management
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.test import TestCase, modify_settings
from django.test.utils import captured_stdout
from .models import ModelWithNullFKToSite, Post
@modify_settings(INSTALLED_APPS={"append": ["empty_models", "no_models"]})
class RemoveStaleContentTypesTests(TestCase):
# Speed up tests by avoiding retrieving ContentTypes for all test apps.
available_apps = [
"contenttypes_tests",
"empty_models",
"no_models",
"django.contrib.contenttypes",
]
@classmethod
def setUpTestData(cls):
with captured_stdout():
call_command(
"remove_stale_contenttypes",
interactive=False,
include_stale_apps=True,
verbosity=2,
)
cls.before_count = ContentType.objects.count()
cls.content_type = ContentType.objects.create(
app_label="contenttypes_tests", model="Fake"
)
def setUp(self):
self.app_config = apps.get_app_config("contenttypes_tests")
def test_interactive_true_with_dependent_objects(self):
"""
interactive mode (the default) deletes stale content types and warns of
dependent objects.
"""
post = Post.objects.create(title="post", content_type=self.content_type)
# A related object is needed to show that a custom collector with
# can_fast_delete=False is needed.
ModelWithNullFKToSite.objects.create(post=post)
with mock.patch("builtins.input", return_value="yes"):
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", verbosity=2, stdout=stdout)
self.assertEqual(Post.objects.count(), 0)
output = stdout.getvalue()
self.assertIn("- Content type for contenttypes_tests.Fake", output)
self.assertIn("- 1 contenttypes_tests.Post object(s)", output)
self.assertIn("- 1 contenttypes_tests.ModelWithNullFKToSite", output)
self.assertIn("Deleting stale content type", output)
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_true_without_dependent_objects(self):
"""
interactive mode deletes stale content types even if there aren't any
dependent objects.
"""
with mock.patch("builtins.input", return_value="yes"):
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", verbosity=2)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""non-interactive mode deletes stale content types."""
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", interactive=False, verbosity=2)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_unavailable_content_type_model(self):
"""A ContentType isn't created if the model isn't available."""
apps = Apps()
with self.assertNumQueries(0):
contenttypes_management.create_contenttypes(
self.app_config, interactive=False, verbosity=0, apps=apps
)
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
@modify_settings(INSTALLED_APPS={"remove": ["empty_models"]})
def test_contenttypes_removed_in_installed_apps_without_models(self):
ContentType.objects.create(app_label="empty_models", model="Fake 1")
ContentType.objects.create(app_label="no_models", model="Fake 2")
with (
mock.patch("builtins.input", return_value="yes"),
captured_stdout() as stdout,
):
call_command("remove_stale_contenttypes", verbosity=2)
self.assertNotIn(
"Deleting stale content type 'empty_models | Fake 1'",
stdout.getvalue(),
)
self.assertIn(
"Deleting stale content type 'no_models | Fake 2'",
stdout.getvalue(),
)
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
@modify_settings(INSTALLED_APPS={"remove": ["empty_models"]})
def test_contenttypes_removed_for_apps_not_in_installed_apps(self):
ContentType.objects.create(app_label="empty_models", model="Fake 1")
ContentType.objects.create(app_label="no_models", model="Fake 2")
with (
mock.patch("builtins.input", return_value="yes"),
captured_stdout() as stdout,
):
call_command(
"remove_stale_contenttypes", include_stale_apps=True, verbosity=2
)
self.assertIn(
"Deleting stale content type 'empty_models | Fake 1'",
stdout.getvalue(),
)
self.assertIn(
"Deleting stale content type 'no_models | Fake 2'",
stdout.getvalue(),
)
self.assertEqual(ContentType.objects.count(), self.before_count) | python | github | https://github.com/django/django | tests/contenttypes_tests/test_management.py |
# Copyright (c) 2015, Javier Gonzalez
# Copyright (c) 2015, the GPy Authors (see GPy AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import time
from ..util.general import best_value, reshape, spawn
from ..core.optimization import lp_batch_optimization, random_batch_optimization, predictive_batch_optimization
try:
from ..plotting.plots_bo import plot_acquisition, plot_convergence
except:
pass
class BO(object):
def __init__(self, acquisition_func):
self.acquisition_func = acquisition_func
def _init_model(self):
pass
def run_optimization(self, max_iter = None, n_inbatch=1, acqu_optimize_method='fast_random', acqu_optimize_restarts=200, batch_method='predictive',
eps = 1e-8, n_procs=1, true_gradients = True, verbose=True):
"""
Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)
:param max_iter: exploration horizon, or number of acquisitions. It nothing is provided optimizes the current acquisition.
:param n_inbatch: number of samples to collected everytime *f* is evaluated (one by default).
:param acqu_optimize_method: method to optimize the acquisition function
-'DIRECT': uses the DIRECT algorithm of Jones and Stuckmann.
-'CMA': uses the Covariance Matrix Adaptation Algorithm.
-'brute': Run local optimizers in a grid of points.
-'random': Run local optimizers started at random locations.
-'fast_brute': the same as brute but runs only one optimizer in the best location. It is used by default.
-'fast_random': the same as random but runs only one optimizer in the best location.
:param acqu_optimize_restarts: numbers of random restarts in the optimization of the acquisition function, default = 20.
:param batch_method: method to collect samples in batches
-'predictive': uses the predicted mean in the selected sample to update the acquisition function.
-'lp': used a penalization of the acquisition function to based on exclusion zones.
-'random': collects the element of the batch randomly
:param eps: minimum distance between two consecutive x's to keep running the model
:param n_procs: The number of processes used for evaluating the given function *f* (ideally nprocs=n_inbatch).
:param true_gradients: If the true gradients (can be slow) of the acquisition ar an approximation is used (True, default).
:param save_interval: number of iterations after which a file is produced with the current results.
"""
# --- Load the parameters of the function into the object.
if max_iter == None:
self.max_iter = 10*self.input_dim
else:
self.max_iter = max_iter
self.num_acquisitions = 0
self.n_inbatch=n_inbatch
self.batch_method = batch_method
if batch_method=='lp':
from .acquisition import AcquisitionMP
if not isinstance(self.acquisition_func, AcquisitionMP):
self.acquisition_func = AcquisitionMP(self.acquisition_func, self.acquisition_func.acquisition_par)
self.eps = eps
self.acqu_optimize_method = acqu_optimize_method
self.acqu_optimize_restarts = acqu_optimize_restarts
self.acquisition_func.set_model(self.model)
self.n_procs = n_procs
# --- Decide wether we use the true gradients to optimize the acquitision function
if true_gradients !=True:
self.true_gradients = False
self.acquisition_func.d_acquisition_function = None
else:
self.true_gradients = true_gradients
# --- Get starting of running time
self.time = time.time()
# --- If this is the first time to optimization is run - update the model and normalize id needed
if self.first_time_optimization:
self._update_model()
prediction = self.model.predict(self.X)
self.s_in_min = np.sqrt(abs(prediction[1]))
self.first_time_optimization = False
# --- Initialization of stop conditions.
k=0
distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2))
# --- BO loop: this loop does the hard work.
while k<self.max_iter and distance_lastX > self.eps:
# --- Augment X
self.X = np.vstack((self.X,self.suggested_sample))
# --- Evaluate *f* in X and augment Y
if self.n_procs==1:
self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample))))
else:
try:
# --- Parallel evaluation of *f* if several cores are available
from multiprocessing import Process, Pipe
from itertools import izip
divided_samples = [self.suggested_sample[i::self.n_procs] for i in xrange(self.n_procs)]
pipe=[Pipe() for i in xrange(self.n_procs)]
proc=[Process(target=spawn(self.f),args=(c,x)) for x,(p,c) in izip(divided_samples,pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
rs = [p.recv() for (p,c) in pipe]
self.Y = np.vstack([self.Y]+rs)
except:
if not hasattr(self, 'parallel_error'):
print 'Error in parallel computation. Fall back to single process!'
self.parallel_error = True
self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample))))
# --- Update internal elements (needed for plotting)
self.num_acquisitions += 1
pred_min = self.model.predict(reshape(self.suggested_sample,self.input_dim))
self.s_in_min = np.vstack((self.s_in_min,np.sqrt(abs(pred_min[1]))))
# --- Update model
try:
self._update_model()
except np.linalg.linalg.LinAlgError:
break
# --- Update stop conditions
k +=1
distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2))
# --- Stop messages and execution time
self.Y_best = best_value(self.Y)
self.x_opt = self.X[np.argmin(self.Y),:]
self.fx_opt = min(self.Y)
self.time = time.time() - self.time
# --- Print stopping reason
if verbose: print '*Optimization completed:'
if k==self.max_iter and distance_lastX > self.eps:
if verbose: print ' -Maximum number of iterations reached.'
return 1
else:
if verbose: print ' -Method converged.'
return 0
def change_to_sparseGP(self, num_inducing):
"""
Changes standard GP estimation to sparse GP estimation
:param num_inducing: number of inducing points for sparse-GP modeling
"""
if self.sparse == True:
raise 'Sparse GP is already in use'
else:
self.num_inducing = num_inducing
self.sparse = True
self._init_model(self.X,self.Y)
def change_to_standardGP(self):
"""
Changes sparse GP estimation to standard GP estimation
"""
if self.sparse == False:
raise 'Sparse GP is already in use'
else:
self.sparse = False
self._init_model(self.X,self.Y)
def _optimize_acquisition(self):
"""
Optimizes the acquisition function. This function selects the type of batch method and passes the arguments for the rest of the optimization.
"""
# ------ Elements of the acquisition function
acqu_name = self.acqu_name
acquisition = self.acquisition_func.acquisition_function
d_acquisition = self.acquisition_func.d_acquisition_function
acquisition_par = self.acquisition_par
model = self.model
# ------ Parameters to optimize the acquisition
acqu_optimize_restarts = self.acqu_optimize_restarts
acqu_optimize_method = self.acqu_optimize_method
n_inbatch = self.n_inbatch
bounds = self.bounds
# ------ Selection of the batch method (if any, predictive used when n_inbathc=1)
if self.batch_method == 'predictive':
X_batch = predictive_batch_optimization(acqu_name, acquisition_par, acquisition, d_acquisition, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch)
elif self.batch_method == 'lp':
X_batch = lp_batch_optimization(self.acquisition_func, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch)
elif self.batch_method == 'random':
X_batch = random_batch_optimization(acquisition, d_acquisition, bounds, acqu_optimize_restarts,acqu_optimize_method, model, n_inbatch)
return X_batch
def _update_model(self):
"""
Updates X and Y in the model and re-optimizes the parameters of the new model
"""
# ------- Normalize acquisition function (if needed)
if self.normalize:
self.model.set_XY(self.X,(self.Y-self.Y.mean())/(self.Y.std()))
else:
self.model.set_XY(self.X,self.Y)
# ------- Optimize model when required
if (self.num_acquisitions%self.model_optimize_interval)==0:
self.model.optimization_runs = [] # clear previous optimization runs so they don't get used.
self.model.optimize_restarts(num_restarts=self.model_optimize_restarts, verbose=self.verbosity)
# ------- Optimize acquisition function
self.suggested_sample = self._optimize_acquisition()
def plot_acquisition(self,filename=None):
"""
Plots the model and the acquisition function.
if self.input_dim = 1: Plots data, mean and variance in one plot and the acquisition function in another plot
if self.input_dim = 2: as before but it separates the mean and variance of the model in two different plots
:param filename: name of the file where the plot is saved
"""
return plot_acquisition(self.bounds,self.input_dim,self.model,self.model.X,self.model.Y,self.acquisition_func.acquisition_function,self.suggested_sample,filename)
def plot_convergence(self,filename=None):
"""
Makes three plots to evaluate the convergence of the model
plot 1: Iterations vs. distance between consecutive selected x's
plot 2: Iterations vs. the mean of the current model in the selected sample.
plot 3: Iterations vs. the variance of the current model in the selected sample.
:param filename: name of the file where the plot is saved
"""
return plot_convergence(self.X,self.Y_best,self.s_in_min,filename)
def get_evaluations(self):
return self.X.copy(), self.Y.copy()
def save_report(self, report_file= 'GPyOpt-results.txt ' ):
"""
Save a report with the results of the optimization. A file is produced every
:param report_file: name of the file in which the results of the optimization are saved.
"""
with open(report_file,'w') as file:
file.write('---------------------------------' + ' Results file ' + '--------------------------------------\n')
file.write('GPyOpt Version 1.0.0 \n')
file.write('Date and time: ' + time.strftime("%c")+'\n')
if self.num_acquisitions==self.max_iter:
file.write('Optimization completed: ' +'YES, ' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n')
else:
file.write('Optimization completed: ' +'NO,' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n')
file.write('Optimization time: ' + str(self.time).strip('[]') +' seconds.\n')
file.write('---------------------------------' + ' Problem set up ' + '------------------------------------\n')
file.write('Problem Dimension: ' + str(self.input_dim).strip('[]') +'\n')
file.write('Problem bounds: ' + str(self.bounds).strip('[]') +'\n')
file.write('Batch size: ' + str(self.n_inbatch).strip('[]') +'\n')
file.write('Acquisition: ' + self.acqu_name + '\n')
file.write('Acquisition optimizer: ' + self.acqu_optimize_method+ '\n')
file.write('Sparse GP: ' + str(self.sparseGP).strip('[]') + '\n')
file.write('---------------------------------' + ' Summary ' + '------------------------------------------\n')
file.write('Best found minimum: ' + str(min(self.Y)).strip('[]') +'\n')
file.write('Minumum location: ' + str(self.X[np.argmin(self.Y),:]).strip('[]') +'\n')
file.close() | unknown | codeparrot/codeparrot-clean | ||
# pyfc4
import copy
import datetime
import io
import json
import pdb
import rdflib
from rdflib.compare import to_isomorphic, graph_diff
import rdflib_jsonld
import requests
import time
from types import SimpleNamespace
import uuid
# logging
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Repository
class Repository(object):
'''
Class for Fedora Commons 4 (FC4), LDP server instance
Args:
root (str): Full URL of repository REST endpoint (e.g. http://localhost:8080/rest)
username (str): username for authorization and roles
password (str): password authorziation and roles
context (dict): dictionary of namespace prefixes and namespace URIs that propagate
to Resources
default_serialization (str): mimetype of default Accept and Content-Type headers
default_auto_refresh (bool): if False, resource create/update, and graph modifications
will not retrieve or parse updates automatically. Dramatically improves performance.
Attributes:
context (dict): Default dictionary of namespace prefixes and namespace URIs
'''
context = {
'premis':'http://www.loc.gov/premis/rdf/v1#',
'test':'info:fedora/test/',
'rdfs':'http://www.w3.org/2000/01/rdf-schema#',
'dbpedia':'http://dbpedia.org/ontology/',
'xsi':'http://www.w3.org/2001/XMLSchema-instance',
'xmlns':'http://www.w3.org/2000/xmlns/',
'rdf':'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'fedora':'http://fedora.info/definitions/v4/repository#',
'xml':'http://www.w3.org/XML/1998/namespace',
'ebucore':'http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#',
'ldp':'http://www.w3.org/ns/ldp#',
'xs':'http://www.w3.org/2001/XMLSchema',
'fedoraconfig':'http://fedora.info/definitions/v4/config#',
'foaf':'http://xmlns.com/foaf/0.1/',
'dc':'http://purl.org/dc/elements/1.1/',
'pcdm':'http://pcdm.org/models#',
'ore':'http://www.openarchives.org/ore/terms/'
}
def __init__(self,
root,
username,
password,
context = None,
default_serialization = 'application/rdf+xml',
default_auto_refresh = False,
custom_resource_type_parser = None
):
# handle root path
self.root = root
if not self.root.endswith('/'): # ensure trailing slash
self.root += '/'
self.username = username
self.password = password
# serialization
self.default_serialization = default_serialization
# default, general auto_refresh
self.default_auto_refresh = default_auto_refresh
# API facade
self.api = API(self)
# instantiate namespace_manager
self.namespace_manager = rdflib.namespace.NamespaceManager(rdflib.Graph())
for ns_prefix, ns_uri in self.context.items():
self.namespace_manager.bind(ns_prefix, ns_uri, override=False)
# if context provided, merge with defaults
if context:
logger.debug('context provided, merging with defaults')
self.context.update(context)
# container for transactions
self.txns = {}
# optional, custom resource type parser
self.custom_resource_type_parser = custom_resource_type_parser
def parse_uri(self, uri=None):
'''
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef
'''
# no uri provided, assume root
if not uri:
return rdflib.term.URIRef(self.root)
# string uri provided
elif type(uri) == str:
# assume "short" uri, expand with repo root
if type(uri) == str and not uri.startswith('http'):
return rdflib.term.URIRef("%s%s" % (self.root, uri))
# else, assume full uri
else:
return rdflib.term.URIRef(uri)
# already rdflib.term.URIRef
elif type(uri) == rdflib.term.URIRef:
return uri
# unknown input
else:
raise TypeError('invalid URI input')
def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource")
def get_resource(self, uri, resource_type=None, response_format=None):
'''
Retrieve resource:
- Issues an initial GET request
- If 200, continues, 404, returns False, otherwise raises Exception
- Parse resource type
- If custom resource type parser provided, this fires
- Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header
- Return instantiated pyfc4 resource
Args:
uri (rdflib.term.URIRef,str): input URI
resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof
response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc.
Returns:
Resource
'''
# handle uri
uri = self.parse_uri(uri)
# remove fcr:metadata if included, as handled below
if uri.toPython().endswith('/fcr:metadata'):
uri = rdflib.term.URIRef(uri.toPython().rstrip('/fcr:metadata'))
# fire GET request
get_response = self.api.http_request(
'GET',
"%s/fcr:metadata" % uri,
response_format=response_format)
# 404, item does not exist, return False
if get_response.status_code == 404:
logger.debug('resource uri %s not found, returning False' % uri)
return False
# assume exists, parse headers for resource type and return instance
elif get_response.status_code == 200:
# if resource_type not provided
if not resource_type:
# if custom resource type parser affixed to repo instance, fire
if self.custom_resource_type_parser:
logger.debug("custom resource type parser provided, attempting")
resource_type = self.custom_resource_type_parser(self, uri, get_response)
# parse LDP resource type from headers if custom resource parser misses,
# or not provided
if not resource_type:
# Issue HEAD request to get LDP resource type from URI proper, not /fcr:metadata
head_response = self.api.http_request('HEAD', uri)
resource_type = self.api.parse_resource_type(head_response)
logger.debug('using resource type: %s' % resource_type)
# return resource
return resource_type(self,
uri,
response=get_response)
else:
raise Exception('HTTP %s, error retrieving resource uri %s' % (get_response.status_code, uri))
def start_txn(self, txn_name=None):
'''
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction
'''
# if no name provided, create one
if not txn_name:
txn_name = uuid.uuid4().hex
# request new transaction
txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None)
# if 201, transaction was created
if txn_response.status_code == 201:
txn_uri = txn_response.headers['Location']
logger.debug("spawning transaction: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = txn_response.headers['Expires'])
# append to self
self.txns[txn_name] = txn
# return
return txn
def get_txn(self, txn_name, txn_uri):
'''
Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri]
'''
# parse uri
txn_uri = self.parse_uri(txn_uri)
# request new transaction
txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None)
# if 200, transaction exists
if txn_response.status_code == 200:
logger.debug("transactoin found: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = None)
# append to self
self.txns[txn_name] = txn
# return
return txn
# if 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % txn_uri)
return False
else:
raise Exception('HTTP %s, could not retrieve transaction' % txn_response.status_code)
# Transaction
class Transaction(Repository):
'''
Class to represent open transactions. Spawned by repository instance, these are stored in
repo.txns.
Inherits:
Repository
Args:
txn_name (str): human name for transaction
txn_uri (rdflib.term.URIRef, str): URI of transaction, also to be used as Transaction root path
expires (str): expires information from headers
'''
def __init__(self,
repo,
txn_name,
txn_uri,
expires = None
):
# fire parent Repository init()
super().__init__(
txn_uri,
repo.username,
repo.password,
context = repo.context,
default_serialization = repo.default_serialization)
# Transaction init
self.name = txn_name
self.expires = expires
# txn status
self.active = True
def keep_alive(self):
'''
Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires
'''
# keep transaction alive
txn_response = self.api.http_request('POST','%sfcr:tx' % self.root, data=None, headers=None)
# if 204, transaction kept alive
if txn_response.status_code == 204:
logger.debug("continuing transaction: %s" % self.root)
# update status and timer
self.active = True
self.expires = txn_response.headers['Expires']
return True
# if 410, transaction does not exist
elif txn_response.status_code == 410:
logger.debug("transaction does not exist: %s" % self.root)
self.active = False
return False
else:
raise Exception('HTTP %s, could not continue transaction' % txn_response.status_code)
def _close(self, close_type):
'''
Ends transaction by committing, or rolling back, all changes during transaction.
Args:
close_type (str): expects "commit" or "rollback"
Return:
(bool)
'''
# commit transaction
txn_response = self.api.http_request('POST','%sfcr:tx/fcr:%s' % (self.root, close_type), data=None, headers=None)
# if 204, transaction was closed
if txn_response.status_code == 204:
logger.debug("%s for transaction: %s, successful" % (close_type, self.root))
# update self.active
self.active = False
# return
return True
# if 410 or 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % self.root)
# update self.active
self.active = False
return False
else:
raise Exception('HTTP %s, could not commit transaction' % txn_response.status_code)
def commit(self):
'''
Fire self._close() method
Args:
None
Returns:
bool
'''
# fire _close method
return self._close('commit')
def rollback(self):
'''
Fire self._close() method
Args:
None
Returns:
bool
'''
# fire _close method
return self._close('rollback')
# API
class API(object):
'''
API for making requests and parsing responses from repository endpoint
Args:
repo (Repository): instance of Repository class
'''
def __init__(self, repo):
# repository instance
self.repo = repo
def http_request(self,
verb,
uri,
data=None,
headers=None,
files=None,
response_format=None,
is_rdf = True,
stream = False
):
'''
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response
'''
# set content negotiated response format for RDFSources
if is_rdf:
'''
Acceptable content negotiated response formats include:
application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)
application/n-triples
application/rdf+xml
text/n3 (or text/rdf+n3)
text/plain
text/turtle (or application/x-turtle)
'''
# set for GET requests only
if verb == 'GET':
# if no response_format has been requested to this point, use repository instance default
if not response_format:
response_format = self.repo.default_serialization
# if headers present, append
if headers and 'Accept' not in headers.keys():
headers['Accept'] = response_format
# if headers are blank, init dictionary
else:
headers = {'Accept':response_format}
# prepare uri for HTTP request
if type(uri) == rdflib.term.URIRef:
uri = uri.toPython()
logger.debug("%s request for %s, format %s, headers %s" %
(verb, uri, response_format, headers))
# manually prepare request
session = requests.Session()
request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)
prepped_request = session.prepare_request(request)
response = session.send(prepped_request,
stream=stream,
)
return response
def parse_resource_type(self, response):
'''
parse resource type from self.http_request()
Note: uses isinstance() as plugins may extend these base LDP resource type.
Args:
response (requests.models.Response): response object
Returns:
[NonRDFSource, BasicContainer, DirectContainer, IndirectContainer]
'''
# parse 'Link' header
links = [
link.split(";")[0].lstrip('<').rstrip('>')
for link in response.headers['Link'].split(', ')
if link.startswith('<http://www.w3.org/ns/ldp#')]
# parse resource type string with self.repo.namespace_manager.compute_qname()
ldp_resource_types = [
self.repo.namespace_manager.compute_qname(resource_type)[2]
for resource_type in links]
logger.debug('Parsed LDP resource types from LINK header: %s' % ldp_resource_types)
# with LDP types in hand, select appropriate resource type
# NonRDF Source
if 'NonRDFSource' in ldp_resource_types:
return NonRDFSource
# Basic Container
elif 'BasicContainer' in ldp_resource_types:
return BasicContainer
# Direct Container
elif 'DirectContainer' in ldp_resource_types:
return DirectContainer
# Indirect Container
elif 'IndirectContainer' in ldp_resource_types:
return IndirectContainer
else:
logger.debug('could not determine resource type from Link header, returning False')
return False
def parse_rdf_payload(self, data, headers):
'''
small function to parse RDF payloads from various repository endpoints
Args:
data (response.data): data from requests response
headers (response.headers): headers from requests response
Returns:
(rdflib.Graph): parsed graph
'''
# handle edge case for content-types not recognized by rdflib parser
if headers['Content-Type'].startswith('text/plain'):
logger.debug('text/plain Content-Type detected, using application/n-triples for parser')
parse_format = 'application/n-triples'
else:
parse_format = headers['Content-Type']
# clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types)
if ';charset' in parse_format:
parse_format = parse_format.split(';')[0]
# parse graph
graph = rdflib.Graph().parse(
data=data.decode('utf-8'),
format=parse_format)
# return graph
return graph
# SparqlUpdate
class SparqlUpdate(object):
'''
Class to handle the creation of Sparql updates via PATCH request.
Accepts prefixes and graphs from resource, computes diff of graphs, and builds sparql query for update.
Args:
prefixes (types.SimpleNamespace): prefixes from resource at self.rdf.prefixes
diffs (types.SimpleNamespace): diffs is comprised of three graphs that are derived from self._diff_graph(), at self.rdf.diffs
'''
def __init__(self, prefixes, diffs):
self.prefixes = prefixes
self.diffs = diffs
# prefixes and namespaces
self.update_namespaces = set()
self.update_prefixes = {}
def _derive_namespaces(self):
'''
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes
'''
# iterate through graphs and get unique namespace uris
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for s,p,o in graph:
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(o) # objects
self.update_namespaces.add(ns_uri)
except:
logger.debug('could not parse Object URI: %s' % ns_uri)
logger.debug(self.update_namespaces)
# build unique prefixes dictionary
# NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes)
for ns_uri in self.update_namespaces:
for k in self.prefixes.__dict__:
if str(ns_uri) == str(self.prefixes.__dict__[k]):
logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri)))
self.update_prefixes[k] = self.prefixes.__dict__[k]
def build_query(self):
'''
Using the three graphs derived from self._diff_graph(), build a sparql update query in the format:
PREFIX foo: <http://foo.com>
PREFIX bar: <http://bar.com>
DELETE {...}
INSERT {...}
WHERE {...}
Args:
None: uses variables from self
Returns:
(str) sparql update query as string
'''
# derive namespaces to include prefixes in Sparql update query
self._derive_namespaces()
sparql_query = ''
# add prefixes
for ns_prefix, ns_uri in self.update_prefixes.items():
sparql_query += "PREFIX %s: <%s>\n" % (ns_prefix, str(ns_uri))
# deletes
removed_serialized = self.diffs.removed.serialize(format='nt').decode('utf-8')
sparql_query += '\nDELETE {\n%s}\n\n' % removed_serialized
# inserts
added_serialized = self.diffs.added.serialize(format='nt').decode('utf-8')
sparql_query += '\nINSERT {\n%s}\n\n' % added_serialized
# where (not yet implemented)
sparql_query += 'WHERE {}'
# debug
# logger.debug(sparql_query)
# return query
return sparql_query
# Resource
class Resource(object):
'''
Linked Data Platform Resource (LDPR)
A HTTP resource whose state is represented in any way that conforms to the simple lifecycle patterns and conventions in section 4. Linked Data Platform Resources.
https://www.w3.org/TR/ldp/
In the LDP hierarchy, this class represents the most abstract entity of "Resource".
Sub-classed by:
NonRDFSource, Container
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
rdf_prefixes_mixins (dict): optional rdf prefixes and namespaces
'''
def __init__(self,
repo,
uri=None,
response=None,
rdf_prefixes_mixins=None):
# repository handle is pinned to resource instance here
self.repo = repo
# parse uri with parse_uri() from repo instance
self.uri = self.repo.parse_uri(uri)
# parse response
# if response provided, parse and set to attributes
if response:
self.response = response
self.data = self.response.content
self.headers = self.response.headers
self.status_code = self.response.status_code
# if response, and status_code is 200, set True
if self.status_code == 200:
self.exists = True
# if not response, set all blank
else:
self.response = None
self.data = None
self.headers = {}
self.status_code = None
self.exists = False
# RDF
self._build_rdf(data=self.data)
# versions
self.versions = SimpleNamespace()
def __repr__(self):
return '<%s Resource, uri: %s>' % (self.__class__.__name__, self.uri)
def uri_as_string(self):
'''
return rdflib.term.URIRef URI as string
Returns:
(str)
'''
return self.uri.toPython()
def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists
def create(self, specify_uri=False, ignore_tombstone=False, serialization_format=None, stream=False, auto_refresh=None):
'''
Primary method to create resources.
Args:
specify_uri (bool): If True, uses PUT verb and sets the URI during creation. If False, uses POST and gets repository minted URI
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
serialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
'''
# if resource claims existence, raise exception
if self.exists:
raise Exception('resource exists attribute True, aborting')
# else, continue
else:
# determine verb based on specify_uri parameter
if specify_uri:
verb = 'PUT'
else:
verb = 'POST'
logger.debug('creating resource %s with verb %s' % (self.uri, verb))
# check if NonRDFSource, or extension thereof
#if so, run self.binary._prep_binary()
if issubclass(type(self),NonRDFSource):
self.binary._prep_binary()
data = self.binary.data
# otherwise, prep for RDF
else:
# determine serialization
if not serialization_format:
serialization_format = self.repo.default_serialization
data = self.rdf.graph.serialize(format=serialization_format)
logger.debug('Serialized graph used for resource creation:')
logger.debug(data.decode('utf-8'))
self.headers['Content-Type'] = serialization_format
# fire creation request
response = self.repo.api.http_request(verb, self.uri, data=data, headers=self.headers, stream=stream)
return self._handle_create(response, ignore_tombstone, auto_refresh)
def _handle_create(self, response, ignore_tombstone, auto_refresh):
'''
Handles response from self.create()
Args:
response (requests.models.Response): response object from self.create()
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
'''
# 201, success, refresh
if response.status_code == 201:
# if not specifying uri, capture from response and append to object
self.uri = self.repo.parse_uri(response.text)
# creation successful
if auto_refresh:
self.refresh()
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh()
# fire resource._post_create hook if exists
if hasattr(self,'_post_create'):
self._post_create(auto_refresh=auto_refresh)
# 404, assumed POST, target location does not exist
elif response.status_code == 404:
raise Exception('HTTP 404, for this POST request target location does not exist')
# 409, conflict, resource likely exists
elif response.status_code == 409:
raise Exception('HTTP 409, resource already exists')
# 410, tombstone present
elif response.status_code == 410:
if ignore_tombstone:
response = self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)
if response.status_code == 204:
logger.debug('tombstone removed, retrying create')
self.create()
else:
raise Exception('HTTP %s, Could not remove tombstone for %s' % (response.status_code, self.uri))
else:
raise Exception('tombstone for %s detected, aborting' % self.uri)
# 415, unsupported media type
elif response.status_code == 415:
raise Exception('HTTP 415, unsupported media type')
# unknown status code
else:
raise Exception('HTTP %s, unknown error creating resource' % response.status_code)
# if all goes well, return self
return self
def options(self):
'''
Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request
'''
# http request
response = self.repo.api.http_request('OPTIONS', self.uri)
return response.headers
def move(self, destination, remove_tombstone=True):
'''
Method to move resource to another location.
Note: by default, this method removes the tombstone at the resource's original URI.
Can use optional flag remove_tombstone to keep tombstone on successful move.
Note: other resource's triples that are managed by Fedora that point to this resource,
*will* point to the new URI after the move.
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
remove_tombstone (bool): defaults to False, set to True to keep tombstone
Returns:
(Resource) new, moved instance of resource
'''
# set move headers
destination_uri = self.repo.parse_uri(destination)
# http request
response = self.repo.api.http_request('MOVE', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
# handle response
if response.status_code == 201:
# set self exists
self.exists = False
# handle tombstone
if remove_tombstone:
tombstone_response = self.repo.api.http_request('DELETE', "%s/fcr:tombstone" % self.uri)
# udpdate uri, refresh, and return
self.uri = destination_uri
self.refresh()
return destination_uri
else:
raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri))
def copy(self, destination):
'''
Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource
'''
# set move headers
destination_uri = self.repo.parse_uri(destination)
# http request
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
# handle response
if response.status_code == 201:
return destination_uri
else:
raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri))
def delete(self, remove_tombstone=True):
'''
Method to delete resources.
Args:
remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.
Returns:
(bool)
'''
response = self.repo.api.http_request('DELETE', self.uri)
# update exists
if response.status_code == 204:
# removal successful, updating self
self._empty_resource_attributes()
if remove_tombstone:
self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)
return True
def refresh(self, refresh_binary=True):
'''
Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None
'''
updated_self = self.repo.get_resource(self.uri)
# if resource type of updated_self != self, raise exception
if not isinstance(self, type(updated_self)):
raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) )
if updated_self:
# update attributes
self.status_code = updated_self.status_code
self.rdf.data = updated_self.rdf.data
self.headers = updated_self.headers
self.exists = updated_self.exists
# update graph if RDFSource
if type(self) != NonRDFSource:
self._parse_graph()
# empty versions
self.versions = SimpleNamespace()
# if NonRDF, set binary attributes
if type(updated_self) == NonRDFSource and refresh_binary:
self.binary.refresh(updated_self)
# fire resource._post_create hook if exists
if hasattr(self,'_post_refresh'):
self._post_refresh()
# cleanup
del(updated_self)
else:
logger.debug('resource %s not found, dumping values')
self._empty_resource_attributes()
def _build_rdf(self, data=None):
'''
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None
'''
# recreate rdf data
self.rdf = SimpleNamespace()
self.rdf.data = data
self.rdf.prefixes = SimpleNamespace()
self.rdf.uris = SimpleNamespace()
# populate prefixes
for prefix,uri in self.repo.context.items():
setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
# graph
self._parse_graph()
def _parse_graph(self):
'''
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
'''
# if resource exists, parse self.rdf.data
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
# else, create empty graph
else:
self.rdf.graph = rdflib.Graph()
# bind any additional namespaces from repo instance, but do not override
self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)
for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
# conversely, add namespaces from parsed graph to self.rdf.prefixes
for ns_prefix, ns_uri in self.rdf.graph.namespaces():
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)
# pin old graph to resource, create copy graph for modifications
self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)
# parse triples for object-like access
self.parse_object_like_triples()
def parse_object_like_triples(self):
'''
method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples
'''
# parse triples as object-like attributes in self.rdf.triples
self.rdf.triples = SimpleNamespace() # prepare triples
for s,p,o in self.rdf.graph:
# get ns info
ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)
# if prefix as list not yet added, add
if not hasattr(self.rdf.triples, ns_prefix):
setattr(self.rdf.triples, ns_prefix, SimpleNamespace())
# same for predicate
if not hasattr(getattr(self.rdf.triples, ns_prefix), predicate):
setattr(getattr(self.rdf.triples, ns_prefix), predicate, [])
# append object for this prefix
getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o)
def _diff_graph(self):
'''
Uses rdflib.compare diff, https://github.com/RDFLib/rdflib/blob/master/rdflib/compare.py
When a resource is retrieved, the graph retrieved and parsed at that time is saved to self.rdf._orig_graph,
and all local modifications are made to self.rdf.graph. This method compares the two graphs and returns the diff
in the format of three graphs:
overlap - triples SHARED by both
removed - triples that exist ONLY in the original graph, self.rdf._orig_graph
added - triples that exist ONLY in the modified graph, self.rdf.graph
These are used for building a sparql update query for self.update.
Args:
None
Returns:
None: sets self.rdf.diffs and adds the three graphs mentioned, 'overlap', 'removed', and 'added'
'''
overlap, removed, added = graph_diff(
to_isomorphic(self.rdf._orig_graph),
to_isomorphic(self.rdf.graph))
diffs = SimpleNamespace()
diffs.overlap = overlap
diffs.removed = removed
diffs.added = added
self.rdf.diffs = diffs
def add_namespace(self, ns_prefix, ns_uri):
'''
preferred method is to instantiate with repository under 'context',
but prefixes / namespaces can be added for a Resource instance
adds to self.rdf.prefixes which will endure through create/update/refresh,
and get added back to parsed graph namespaces
Args:
ns_prefix (str): prefix for namespace, e.g. 'dc', 'foaf'
ns_uri (str): string of namespace / ontology. e.g. 'http://purl.org/dc/elements/1.1/', 'http://xmlns.com/foaf/0.1/'
Returns:
None: binds this new prefix:namespace combination to self.rdf.prefixes for use, and self.rdf.graph for serialization
'''
# add to prefixes
setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))
# bind to graph
self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)
def _empty_resource_attributes(self):
'''
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes
'''
self.status_code = 404
self.headers = {}
self.exists = False
# build RDF
self.rdf = self._build_rdf()
# if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty()
def _handle_object(self, object_input):
'''
Method to handle possible values passed for adding, removing, modifying triples.
Detects type of input and sets appropriate http://www.w3.org/2001/XMLSchema# datatype
Args:
object_input (str,int,datetime,): many possible inputs
Returns:
(rdflib.term.Literal): with appropriate datatype attribute
'''
# if object is string, convert to rdflib.term.Literal with appropriate datatype
if type(object_input) == str:
return rdflib.term.Literal(object_input, datatype=rdflib.XSD.string)
# integer
elif type(object_input) == int:
return rdflib.term.Literal(object_input, datatype=rdflib.XSD.int)
# float
elif type(object_input) == float:
return rdflib.term.Literal(object_input, datatype=rdflib.XSD.float)
# date
elif type(object_input) == datetime.datetime:
return rdflib.term.Literal(object_input, datatype=rdflib.XSD.date)
else:
return object_input
def add_triple(self, p, o, auto_refresh=True):
'''
add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph
'''
self.rdf.graph.add((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh)
def set_triple(self, p, o, auto_refresh=True):
'''
Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph
'''
self.rdf.graph.set((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh)
def remove_triple(self, p, o, auto_refresh=True):
'''
remove triple by supplying p,o
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: removes triple from self.rdf.graph
'''
self.rdf.graph.remove((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh)
def _handle_triple_refresh(self, auto_refresh):
'''
method to refresh self.rdf.triples if auto_refresh or defaults set to True
'''
# if auto_refresh set, and True, refresh
if auto_refresh:
self.parse_object_like_triples()
# else, if auto_refresh is not set (None), check repository instance default
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.parse_object_like_triples()
def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):
'''
Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,
creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.
Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata
If the resource is NonRDF (Binary), this also method also updates the binary data.
Args:
sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
update_binary (bool): If True, and resource is NonRDF, updates binary data as well
Returns:
(bool)
'''
# run diff on graphs, send as PATCH request
self._diff_graph()
sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)
if sparql_query_only:
return sq.build_query()
response = self.repo.api.http_request(
'PATCH',
'%s/fcr:metadata' % self.uri, # send RDF updates to URI/fcr:metadata
data=sq.build_query(),
headers={'Content-Type':'application/sparql-update'})
# if RDF update not 204, raise Exception
if response.status_code != 204:
logger.debug(response.content)
raise Exception('HTTP %s, expecting 204' % response.status_code)
# if NonRDFSource, and self.binary.data is not a Response object, update binary as well
if type(self) == NonRDFSource and update_binary and type(self.binary.data) != requests.models.Response:
self.binary._prep_binary()
binary_data = self.binary.data
binary_response = self.repo.api.http_request(
'PUT',
self.uri,
data=binary_data,
headers={'Content-Type':self.binary.mimetype})
# if not refreshing RDF, still update binary here
if not auto_refresh and not self.repo.default_auto_refresh:
logger.debug("not refreshing resource RDF, but updated binary, so must refresh binary data")
updated_self = self.repo.get_resource(self.uri)
self.binary.refresh(updated_self)
# fire optional post-update hook
if hasattr(self,'_post_update'):
self._post_update()
# determine refreshing
'''
If not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data
'''
if auto_refresh:
self.refresh(refresh_binary=update_binary)
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh(refresh_binary=update_binary)
return True
def children(self, as_resources=False):
'''
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(child) for child in children ]
return children
def parents(self, as_resources=False):
'''
method to return hierarchical parents of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving parent as resource')
parents = [ self.repo.get_resource(parent) for parent in parents ]
return parents
def siblings(self, as_resources=False):
'''
method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
siblings = set()
# loop through parents and get children
for parent in self.parents(as_resources=True):
for sibling in parent.children(as_resources=as_resources):
siblings.add(sibling)
# remove self
if as_resources:
siblings.remove(self)
if not as_resources:
siblings.remove(self.uri)
return list(siblings)
def _affix_version(self, version_uri, version_label):
# retrieve version
version_resource = self.repo.get_resource(version_uri)
# instantiate ResourceVersion
rv = ResourceVersion(self, version_resource, version_uri, version_label)
# append to self.versions
setattr(self.versions, version_label, rv)
def create_version(self, version_label):
'''
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions
'''
# create version
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
# if 201, assume success
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response.headers['Location'])
# affix version
self._affix_version(version_response.headers['Location'], version_label)
def get_versions(self):
'''
retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances
'''
# get all versions
versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri)
# parse response
versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)
# loop through fedora.hasVersion
for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):
# get label
version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython()
# affix version
self._affix_version(version_uri, version_label)
def dump(self,format='ttl'):
'''
Convenience method to return RDF data for resource,
optionally selecting serialization format.
Inspired by .dump from Samvera.
Args:
format (str): expecting serialization formats accepted by rdflib.serialization(format=)
'''
return self.rdf.graph.serialize(format=format).decode('utf-8')
# Resource Version
class ResourceVersion(Resource):
'''
Class to represent versions of a resource.
Versions are spawned by the Resource class method resource.create_version(), or retrieved by resource.get_versions().
Versions are stored in the resource instance at resource.versions
Args:
version_resource (Resource): retrieved and prased resource version
version_uri (rdflib.term.URIRef, str): uri of version
version_label (str): lable for version
'''
def __init__(self, current_resource, version_resource, version_uri, version_label):
self._current_resource = current_resource
self.resource = version_resource
self.uri = version_uri
self.label = version_label
def revert_to(self):
'''
method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource
'''
# send patch
response = self.resource.repo.api.http_request('PATCH', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('reverting to previous version of resource, %s' % self.uri)
# refresh current resource handle
self._current_resource.refresh()
else:
raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))
def delete(self):
'''
method to remove version from resource's history
'''
# send patch
response = self.resource.repo.api.http_request('DELETE', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('deleting previous version of resource, %s' % self.uri)
# remove from resource versions
delattr(self._current_resource.versions, self.label)
# if 400, likely most recent version and cannot remove
elif response.status_code == 400:
raise Exception('HTTP 400, likely most recent resource version which cannot be removed')
else:
raise Exception('HTTP %s, could not delete resource version: %s' % (response.status_code, self.uri))
# Binary Data
class BinaryData(object):
'''
Class to handle binary data for NonRDFSource (Binary) resources
Builds out self.binary, and provides some method for setting/accessing binary data
Args:
resource (NonRDFSource): instance of NonRDFSource resource
'''
def __init__(self, resource, binary_data, binary_mimetype):
# scaffold
self.resource = resource
self.delivery = None
self.data = binary_data
self.stream = False
self.mimetype = binary_mimetype
self.location = None
# if resource exists, issue GET and prep for use
if self.resource.exists:
self.parse_binary()
def empty(self):
'''
Method to empty attributes, particularly for use when
object is deleted but remains as variable
'''
self.resource = None
self.delivery = None
self.data = None
self.stream = False
self.mimetype = None
self.location = None
def refresh(self, updated_self):
'''
method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes
'''
logger.debug('refreshing binary attributes')
self.mimetype = updated_self.binary.mimetype
self.data = updated_self.binary.data
def parse_binary(self):
'''
when retrieving a NonRDF resource, parse binary data and make available
via generators
'''
# derive mimetype
self.mimetype = self.resource.rdf.graph.value(
self.resource.uri,
self.resource.rdf.prefixes.ebucore.hasMimeType).toPython()
# get binary content as stremable response
self.data = self.resource.repo.api.http_request(
'GET',
self.resource.uri,
data=None,
headers={'Content-Type':self.resource.mimetype},
is_rdf=False,
stream=True)
def _prep_binary(self):
'''
method is used to check/prep data and headers for NonRDFSource create or update
Args:
None
Returns:
None: sets attributes in self.binary and headers
'''
logger.debug('preparing NonRDFSource data for create/update')
# handle mimetype / Content-Type
self._prep_binary_mimetype()
# handle binary data
self._prep_binary_content()
def _prep_binary_mimetype(self):
'''
Sets Content-Type header based on headers and/or self.binary.mimetype values
Implicitly favors Content-Type header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
'''
# neither present
if not self.mimetype and 'Content-Type' not in self.resource.headers.keys():
raise Exception('to create/update NonRDFSource, mimetype or Content-Type header is required')
# mimetype, no Content-Type
elif self.mimetype and 'Content-Type' not in self.resource.headers.keys():
logger.debug('setting Content-Type header with provided mimetype: %s'
% self.mimetype)
self.resource.headers['Content-Type'] = self.mimetype
def _prep_binary_content(self):
'''
Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
'''
# nothing present
if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():
raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')
elif 'Content-Location' in self.resource.headers.keys():
logger.debug('Content-Location header found, using')
self.delivery = 'header'
# if Content-Location is not set, look for self.data_location then self.data
elif 'Content-Location' not in self.resource.headers.keys():
# data_location set, trumps Content self.data
if self.location:
# set appropriate header
self.resource.headers['Content-Location'] = self.location
self.delivery = 'header'
# data attribute is plain text, binary, or file-like object
elif self.data:
# if file-like object, set flag for api.http_request
if isinstance(self.data, io.BufferedIOBase):
logger.debug('detected file-like object')
self.delivery = 'payload'
# else, just bytes
else:
logger.debug('detected bytes')
self.delivery = 'payload'
def range(self, byte_start, byte_end, stream=True):
'''
method to return a particular byte range from NonRDF resource's binary data
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Args:
byte_start(int): position of range start
byte_end(int): position of range end
Returns:
(requests.Response): streamable response
'''
response = self.resource.repo.api.http_request(
'GET',
self.resource.uri,
data=None,
headers={
'Content-Type':self.mimetype,
'Range':'bytes=%s-%s' % (byte_start, byte_end)
},
is_rdf=False,
stream=stream)
# expects 206
if response.status_code == 206:
return response
else:
raise Exception('HTTP %s, but was expecting 206' % response.status_code)
# NonRDF Source
class NonRDFSource(Resource):
'''
Linked Data Platform Non-RDF Source (LDP-NR)
An LDPR whose state is not represented in RDF. For example, these can be binary or text documents that do not have useful RDF representations.
https://www.w3.org/TR/ldp/
Note: When a pre-existing NonRDFSource is retrieved, the binary data is stored under self.binary.data as a
streamable requests object.
Inherits:
Resource
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
binary_data: optional, file data, accepts file-like object, raw data, or URL
binary_mimetype: optional, mimetype for provided data
'''
def __init__(self, repo, uri=None, response=None, binary_data=None, binary_mimetype=None):
self.mimetype = None
# fire parent Resource init()
super().__init__(repo, uri=uri, response=response)
# build binary data with BinaryData class instance
self.binary = BinaryData(self, binary_data, binary_mimetype)
def fixity(self, response_format=None):
'''
Issues fixity check, return parsed graph
Args:
None
Returns:
(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)
'''
# if no response_format, use default
if not response_format:
response_format = self.repo.default_serialization
# issue GET request for fixity check
response = self.repo.api.http_request('GET', '%s/fcr:fixity' % self.uri)
# parse
fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers)
# determine verdict
for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome):
if outcome.toPython() == 'SUCCESS':
verdict = True
else:
verdict = False
return {
'verdict':verdict,
'premis_graph':fixity_graph
}
# 'Binary' is an alias for NonRDFSource
Binary = NonRDFSource
# RDF Source
class RDFResource(Resource):
'''
Linked Data Platform RDF Source (LDP-RS)
An LDPR whose state is fully represented in RDF, corresponding to an RDF graph. See also the term RDF Source from [rdf11-concepts].
https://www.w3.org/TR/ldp/
Sub-classed by:
Container
Inherits:
Resource
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
'''
def __init__(self, repo, uri=None, response=None):
# fire parent Resource init()
super().__init__(repo, uri=uri, response=response)
# Container
class Container(RDFResource):
'''
Linked Data Platform Container (LDPC)
A LDP-RS representing a collection of linked documents (RDF Document [rdf11-concepts] or information resources [WEBARCH]) that responds to client requests for creation, modification, and/or enumeration of its linked members and documents, and that conforms to the simple lifecycle patterns and conventions in section 5. Linked Data Platform Containers.
https://www.w3.org/TR/ldp/
Sub-classed by:
BasicContainer, IndirectContainer, DirectContainer
Inherits:
RDFResource
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
'''
def __init__(self, repo, uri=None, response=None):
# fire parent RDFResource init()
super().__init__(repo, uri=uri, response=response)
# Basic Container
class BasicContainer(Container):
'''
Linked Data Platform Basic Container (LDP-BC)
An LDPC that defines a simple link to its contained documents (information resources) [WEBARCH].
https://www.w3.org/TR/ldp/
https://gist.github.com/hectorcorrea/dc20d743583488168703
- "The important thing to notice is that by posting to a Basic Container, the LDP server automatically adds a triple with ldp:contains predicate pointing to the new resource created."
Inherits:
Container
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
'''
def __init__(self, repo, uri=None, response=None):
# fire parent Container init()
super().__init__(repo, uri=uri, response=response)
# Direct Container
class DirectContainer(Container):
'''
Linked Data Platform Direct Container (LDP-DC)
An LDPC that adds the concept of membership, allowing the flexibility of choosing what form its membership triples take, and allows members to be any resources [WEBARCH], not only documents.
https://www.w3.org/TR/ldp/
When adding children, can also write relationships to another resource
Inherits:
Container
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
membershipResource (rdflib.term.URIRef): resource that will accumlate triples as children are added
hasMemberRelation (rdflib.term.URIRef): predicate that will be used when pointing from URI in ldp:membershipResource to children
'''
def __init__(self,
repo,
uri=None,
response=None,
membershipResource=None,
hasMemberRelation=None):
# fire parent Container init()
super().__init__(repo, uri=uri, response=response)
# if resource does not yet exist, set rdf:type
self.add_triple(self.rdf.prefixes.rdf.type, self.rdf.prefixes.ldp.DirectContainer)
# save membershipResource, hasMemberRelation
self.membershipResource = membershipResource
self.hasMemberRelation = hasMemberRelation
# if membershipResource or hasMemberRelation args are set, set triples
if membershipResource:
self.add_triple(self.rdf.prefixes.ldp.membershipResource, membershipResource)
if hasMemberRelation:
self.add_triple(self.rdf.prefixes.ldp.hasMemberRelation, hasMemberRelation)
# Indirect Container
class IndirectContainer(Container):
'''
Linked Data Platform Indirect Container (LDP-IC)
An LDPC similar to a LDP-DC that is also capable of having members whose URIs are based on the content of its contained documents rather than the URIs assigned to those documents.
https://www.w3.org/TR/ldp/
Inherits:
Container
Args:
repo (Repository): instance of Repository class
uri (rdflib.term.URIRef,str): input URI
response (requests.models.Response): defaults None, but if passed, populate self.data, self.headers, self.status_code
membershipResource (rdflib.term): resource that will accumlate triples as children are added
hasMemberRelation (rdflib.term): predicate that will be used when pointing from URI in ldp:membershipResource to ldp:insertedContentRelation
insertedContentRelation (rdflib.term): destination for ldp:hasMemberRelation from ldp:membershipResource
'''
def __init__(self,
repo,
uri=None,
response=None,
membershipResource=None,
hasMemberRelation=None,
insertedContentRelation=None):
# fire parent Container init()
super().__init__(repo, uri=uri, response=response)
# if resource does not yet exist, set rdf:type
self.add_triple(self.rdf.prefixes.rdf.type, self.rdf.prefixes.ldp.IndirectContainer)
# save membershipResource, hasMemberRelation
self.membershipResource = membershipResource
self.hasMemberRelation = hasMemberRelation
self.insertedContentRelation = insertedContentRelation
# if membershipResource, hasMemberRelation, or insertedContentRelation args are set, set triples
if membershipResource:
self.add_triple(self.rdf.prefixes.ldp.membershipResource, membershipResource)
if hasMemberRelation:
self.add_triple(self.rdf.prefixes.ldp.hasMemberRelation, hasMemberRelation)
if insertedContentRelation:
self.add_triple(self.rdf.prefixes.ldp.insertedContentRelation, insertedContentRelation) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
from ..models import GPLVM
from ..mappings import *
class BCGPLVM(GPLVM):
"""
Back constrained Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
:param mapping: mapping for back constraint
:type mapping: GPy.core.Mapping object
"""
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, normalize_Y=False, mapping=None):
if mapping is None:
mapping = Kernel(X=Y, output_dim=input_dim)
self.mapping = mapping
GPLVM.__init__(self, Y, input_dim, init, X, kernel, normalize_Y)
self.X = self.mapping.f(self.likelihood.Y)
def _get_param_names(self):
return self.mapping._get_param_names() + GP._get_param_names(self)
def _get_params(self):
return np.hstack((self.mapping._get_params(), GP._get_params(self)))
def _set_params(self, x):
self.mapping._set_params(x[:self.mapping.num_params])
self.X = self.mapping.f(self.likelihood.Y)
GP._set_params(self, x[self.mapping.num_params:])
def _log_likelihood_gradients(self):
dL_df = self.kern.gradients_X(self.dL_dK, self.X)
dL_dtheta = self.mapping.df_dtheta(dL_df, self.likelihood.Y)
return np.hstack((dL_dtheta.flatten(), GP._log_likelihood_gradients(self))) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Database\Eloquent\Casts;
use Illuminate\Contracts\Database\Eloquent\Castable;
use Illuminate\Contracts\Database\Eloquent\CastsAttributes;
use Illuminate\Support\Stringable;
class AsStringable implements Castable
{
/**
* Get the caster class to use when casting from / to this cast target.
*
* @param array $arguments
* @return \Illuminate\Contracts\Database\Eloquent\CastsAttributes<\Illuminate\Support\Stringable, string|\Stringable>
*/
public static function castUsing(array $arguments)
{
return new class implements CastsAttributes
{
public function get($model, $key, $value, $attributes)
{
return isset($value) ? new Stringable($value) : null;
}
public function set($model, $key, $value, $attributes)
{
return isset($value) ? (string) $value : null;
}
};
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Database/Eloquent/Casts/AsStringable.php |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vrrp
version_added: "2.1"
short_description: Manages VRRP configuration on NX-OS switches.
description:
- Manages VRRP configuration on NX-OS switches.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- VRRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for VRRP.
required: true
priority:
description:
- VRRP priority.
required: false
default: null
vip:
description:
- VRRP virtual IP address.
required: false
default: null
authentication:
description:
- Clear text authentication string.
required: false
default: null
admin_state:
description:
- Used to enable or disable the VRRP process.
required: false
choices: ['shutdown', 'no shutdown']
default: no shutdown
version_added: "2.2"
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
host: 68.170.147.165
- name: Ensure removal of the vrrp group config
# vip is required to ensure the user knows what they are removing
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
state: absent
host: 68.170.147.165
- name: Re-config with more params
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
preempt: false
priority: 130
authentication: AUTHKEY
host: 68.170.147.165
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"authentication": "testing", "group": "150", "vip": "10.1.15.1",
"admin_state": "no shutdown"}
existing:
description: k/v pairs of existing vrrp info on the interface
type: dict
sample: {}
end_state:
description: k/v pairs of vrrp after module execution
returned: always
type: dict
sample: {"authentication": "testing", "group": "150", "interval": "1",
"preempt": true, "priority": "100", "vip": "10.1.15.1",
"admin_state": "no shutdown"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
"authentication text testing", "no shutdown"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
import collections
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh_vrrp(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
response = response[0].replace(command + '\n\n', '').strip()
body = [json.loads(response)]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh_vrrp(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode, name
def get_vrr_status(group, module, interface):
command = 'show run all | section interface.{0}$'.format(interface)
body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
vrf_index = None
admin_state = 'shutdown'
if body:
splitted_body = body.splitlines()
for index in range(0, len(splitted_body) - 1):
if splitted_body[index].strip() == 'vrrp {0}'.format(group):
vrf_index = index
vrf_section = splitted_body[vrf_index::]
for line in vrf_section:
if line.strip() == 'no shutdown':
admin_state = 'no shutdown'
break
return admin_state
def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
vrrp_key = {
'sh_group_id': 'group',
'sh_vip_addr': 'vip',
'sh_priority': 'priority',
'sh_group_preempt': 'preempt',
'sh_auth_text': 'authentication',
'sh_adv_interval': 'interval'
}
try:
vrrp_table = body[0]['TABLE_vrrp_group']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(vrrp_table, dict):
vrrp_table = [vrrp_table]
for each_vrrp in vrrp_table:
vrrp_row = each_vrrp['ROW_vrrp_group']
parsed_vrrp = apply_key_map(vrrp_key, vrrp_row)
if parsed_vrrp['preempt'] == 'Disable':
parsed_vrrp['preempt'] = False
elif parsed_vrrp['preempt'] == 'Enable':
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
return parsed_vrrp
return vrrp
def get_commands_config_vrrp(delta, group):
commands = []
CMDS = {
'priority': 'priority {0}',
'preempt': 'preempt',
'vip': 'address {0}',
'interval': 'advertisement-interval {0}',
'auth': 'authentication text {0}'
}
vip = delta.get('vip')
priority = delta.get('priority')
preempt = delta.get('preempt')
interval = delta.get('interval')
auth = delta.get('authentication')
admin_state = delta.get('admin_state')
if vip:
commands.append((CMDS.get('vip')).format(vip))
if priority:
commands.append((CMDS.get('priority')).format(priority))
if preempt:
commands.append(CMDS.get('preempt'))
elif preempt is False:
commands.append('no ' + CMDS.get('preempt'))
if interval:
commands.append((CMDS.get('interval')).format(interval))
if auth:
commands.append((CMDS.get('auth')).format(auth))
if admin_state:
commands.append(admin_state)
commands.insert(0, 'vrrp {0}'.format(group))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(param, module):
value = module.params[param]
if param == 'group':
try:
if (int(value) < 1 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 1 and 255", group=value)
elif param == 'priority':
try:
if (int(value) < 1 or int(value) > 254):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 1 and 254", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
priority=dict(required=False, type='str'),
preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
admin_state=dict(required=False, type='str',
choices=['shutdown', 'no shutdown'],
default='no shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'],
required=False, default='present'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
interface = module.params['interface'].lower()
group = module.params['group']
priority = module.params['priority']
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
admin_state = module.params['admin_state']
transport = module.params['transport']
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and transport == 'cli'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
vip=vip, authentication=authentication,
admin_state=admin_state)
proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.iteritems()).difference(existing.iteritems()))
if delta:
command = get_commands_config_vrrp(delta, group)
commands.append(command)
elif state == 'absent':
if existing:
commands.append(['no vrrp {0}'.format(group)])
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
execute_config_command(cmds, module)
changed = True
end_state = get_existing_vrrp(interface, group, module, name)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
from gunicorn import six
# Classes that can undo reading data from
# a given type of data source.
class Unreader(object):
def __init__(self):
self.buf = six.BytesIO()
def chunk(self):
raise NotImplementedError()
def read(self, size=None):
if size is not None and not isinstance(size, six.integer_types):
raise TypeError("size parameter must be an int or long.")
if size is not None:
if size == 0:
return b""
if size < 0:
size = None
self.buf.seek(0, os.SEEK_END)
if size is None and self.buf.tell():
ret = self.buf.getvalue()
self.buf = six.BytesIO()
return ret
if size is None:
d = self.chunk()
return d
while self.buf.tell() < size:
chunk = self.chunk()
if not len(chunk):
ret = self.buf.getvalue()
self.buf = six.BytesIO()
return ret
self.buf.write(chunk)
data = self.buf.getvalue()
self.buf = six.BytesIO()
self.buf.write(data[size:])
return data[:size]
def unread(self, data):
self.buf.seek(0, os.SEEK_END)
self.buf.write(data)
class SocketUnreader(Unreader):
def __init__(self, sock, max_chunk=8192):
super(SocketUnreader, self).__init__()
self.sock = sock
self.mxchunk = max_chunk
def chunk(self):
return self.sock.recv(self.mxchunk)
class IterUnreader(Unreader):
def __init__(self, iterable):
super(IterUnreader, self).__init__()
self.iter = iter(iterable)
def chunk(self):
if not self.iter:
return b""
try:
return six.next(self.iter)
except StopIteration:
self.iter = None
return b"" | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
class Dense(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class Dropout(base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = array_ops.shape(inputs)
noise_shape = [
symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)
]
return noise_shape
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def compute_output_shape(self, input_shape):
return input_shape
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
class Flatten(base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = base.InputSpec(min_ndim=2)
def call(self, inputs):
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if context.in_graph_mode():
outputs.set_shape(self.compute_output_shape(inputs.get_shape()))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def flatten(inputs, name=None):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense | unknown | codeparrot/codeparrot-clean | ||
'use strict'
// install redis first:
// https://redis.io/
// then:
// $ npm install redis online
// $ redis-server
/**
* Module dependencies.
*/
var express = require('../..');
var online = require('online');
var redis = require('redis');
var db = redis.createClient();
// online
online = online(db);
// app
var app = express();
// activity tracking, in this case using
// the UA string, you would use req.user.id etc
app.use(function(req, res, next){
// fire-and-forget
online.add(req.headers['user-agent']);
next();
});
/**
* List helper.
*/
function list(ids) {
return '<ul>' + ids.map(function(id){
return '<li>' + id + '</li>';
}).join('') + '</ul>';
}
/**
* GET users online.
*/
app.get('/', function(req, res, next){
online.last(5, function(err, ids){
if (err) return next(err);
res.send('<p>Users online: ' + ids.length + '</p>' + list(ids));
});
});
/* istanbul ignore next */
if (!module.parent) {
app.listen(3000);
console.log('Express started on port 3000');
} | javascript | github | https://github.com/expressjs/express | examples/online/index.js |
test_kind: js_test
selector:
roots:
- jstests/core/**/*.js
- jstests/fle2/**/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
exclude_files:
### Tests that are excluded because of initial sync (from replica_sets_initsync_jscore_passthrough.yml)
# Tests that query the system.profile collection cannot run in this suite since an initial sync
# may insert unexpected operations into the profile collection.
- jstests/core/**/profile_list_collections.js
- jstests/core/**/profile_list_indexes.js
- jstests/core/**/query/recursion.js
- jstests/core/**/system_profile.js
# The following test examines the SBE plan cache, which initial sync may change the contents of.
- jstests/core/query/plan_cache/plan_cache_sbe.js
# operation_latency_histogram.js and geo_s2cursorlimitskip.js do not expect concurrent reads
# against their test collections.
- jstests/core/**/operation_latency_histogram.js
- jstests/core/**/geo_s2cursorlimitskip.js
# These tests run getLatestProfilerEntry(). The downstream syncing node affects the profiler.
- jstests/core/**/profile_agg.js
- jstests/core/**/profile_count.js
- jstests/core/**/profile_delete.js
- jstests/core/**/profile_distinct.js
- jstests/core/**/profile_find.js
- jstests/core/**/profile_findandmodify.js
- jstests/core/**/profile_getmore.js
- jstests/core/**/profile_insert.js
- jstests/core/**/profile_mapreduce.js
- jstests/core/**/profile_sampling.js
- jstests/core/**/profile_update.js
- jstests/core/txns/transactions_profiling.js
# Will fail all commands including those needed to forward command to initial sync node.
- jstests/core/testing/failcommand_failpoint.js
# Starts MongoCryptD instead of mongod nodes
- src/mongo/db/modules/*/jstests/fle2/fle2_bulk_write.js
# Time-outs waiting for the response to a ServerStatus request because
# the node in init sync state ignores it and never responds to it.
- jstests/core/query/release_memory/hash_lookup.js
- jstests/core/query/release_memory/hash_lookup_unwind.js
- jstests/core/query/release_memory/graph_lookup.js
- jstests/core/query/release_memory/group.js
- jstests/core/query/release_memory/set_window_fields.js
- jstests/core/query/release_memory/sort.js
- jstests/core/query/release_memory/text_or.js
- jstests/core/timeseries/query/timeseries_internal_bounded_sort_release_memory.js
exclude_with_any_tags:
- assumes_standalone_mongod
- incompatible_with_initial_sync
# These tests run many aggregations, and the override slows them down enough to hit the evergreen timeout.
- query_intensive_pbt
executor:
hooks:
- class: CleanEveryN
n: 20
config:
shell_options:
eval: >-
globalThis.testingReplication = true;
await import('jstests/libs/override_methods/send_command_to_initial_sync_node_replica_set.js');
fixture:
class: ReplicaSetFixture
mongod_options:
set_parameters:
enableTestCommands: 1
collectionClonerBatchSize: 10
initialSyncOplogFetcherBatchSize: 10
num_nodes: 2
start_initial_sync_node: True
initial_sync_uninitialized_fcv: True | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/replica_sets_uninitialized_fcv_jscore_passthrough.yml |
from __future__ import print_function
import argparse
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import models
import horovod.torch as hvd
import timeit
import numpy as np
# Apex
from apex import amp
# Benchmark settings
parser = argparse.ArgumentParser(
description="PyTorch Synthetic Benchmark",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--model", type=str, default="resnet50", help="model to benchmark")
parser.add_argument(
"--batch-size", type=int, default=32, help="input batch size")
parser.add_argument(
"--num-warmup-batches",
type=int,
default=10,
help="number of warm-up batches that don\"t count towards benchmark")
parser.add_argument(
"--num-batches-per-iter",
type=int,
default=10,
help="number of batches per benchmark iteration")
parser.add_argument(
"--num-iters", type=int, default=10, help="number of benchmark iterations")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--amp-fp16",
action="store_true",
default=False,
help="Enables FP16 training with Apex.")
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
hvd.init()
if args.cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
cudnn.benchmark = True
# Set up standard model.
model = getattr(models, args.model)()
if args.cuda:
# Move model to GPU.
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Apex
if args.amp_fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
# Set up fixed fake data
data = torch.randn(args.batch_size, 3, 224, 224)
target = torch.LongTensor(args.batch_size).random_() % 1000
if args.cuda:
data, target = data.cuda(), target.cuda()
def benchmark_step():
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
# Apex
if args.amp_fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
else:
loss.backward()
optimizer.step()
def log(s, nl=True):
if hvd.rank() != 0:
return
print(s, end="\n" if nl else "")
log(f"Model: {args.model}")
log("Batch size: %d" % args.batch_size)
device = "GPU" if args.cuda else "CPU"
log("Number of %ss: %d" % (device, hvd.size()))
# Warm-up
log("Running warmup...")
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
# Benchmark
log("Running benchmark...")
img_secs = []
for x in range(args.num_iters):
time = timeit.timeit(benchmark_step, number=args.num_batches_per_iter)
img_sec = args.batch_size * args.num_batches_per_iter / time
log("Iter #%d: %.1f img/sec per %s" % (x, img_sec, device))
img_secs.append(img_sec)
# Results
img_sec_mean = np.mean(img_secs)
img_sec_conf = 1.96 * np.std(img_secs)
log(f"Img/sec per {device}: {img_sec_mean:.1f} +-{img_sec_conf:.1f}")
log("Total img/sec on %d %s(s): %.1f +-%.1f" %
(hvd.size(), device, hvd.size() * img_sec_mean, hvd.size() * img_sec_conf)) | unknown | codeparrot/codeparrot-clean | ||
import sys
from mx.Stack import *
def test():
s = Stack()
print repr(s)
s = Stack()
for i in range(1000):
s.push(i)
while s:
print s.pop(),
# which could also be done as:
s = StackFromSequence(range(1000))
while s:
print s.pop(),
# or a little different
s = StackFromSequence(range(1000))
print s.as_tuple()
print s.as_list()
print
print 'Pop many.'
assert s.pop_many(3) == (999, 998, 997)
print 'Push many.'
s.push_many(range(100))
assert s.pop_many(100) == tuple(range(100-1,-1,-1))
print 'Resize.'
assert len(s) > 0
s.resize()
print 'Clear.'
s.clear()
assert len(s) == 0
print 'Non-zero testing.'
s.push_many(range(100))
i = 0
while s:
s.pop()
i = i + 1
assert i == 100
# push many + exceptions
print 'Push many and exceptions.'
class C:
def __getitem__(self,i):
if i < 50:
return i + 1
else:
raise IndexError
def __len__(self):
return 100
l = C()
try:
s.push_many(l)
except IndexError:
pass
else:
raise AssertionError,'push_many() does not handle errors correctly'
assert len(s) == 0
del s
# Index access
print 'Index access.'
s = StackFromSequence(range(1000))
for i in range(1000):
assert s[i] == i
for i in range(1000):
assert s[-i-1] == 999-i
i = 0
for x in s:
assert x == i
i = i + 1
del s
# Implementation deleaked up to this line.
print
print 'Works.'
if '-m' in sys.argv:
while 1:
test()
else:
test() | unknown | codeparrot/codeparrot-clean | ||
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import bdd_test_util
from contexthelper import ContextHelper
import json
from abc import ABCMeta, abstractmethod
class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService, ports):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService
self.ports = ports
def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue
class CompositionCallback:
__metaclass__ = ABCMeta
@abstractmethod
def composing(self, composition, context):
pass
@abstractmethod
def decomposing(self, composition, context):
pass
@abstractmethod
def getEnv(self, composition, context, env):
pass
class Test(CompositionCallback):
def composing(self, composition, context):
pass
def decomposing(self, composition, context):
pass
def getEnv(self, composition, context, env):
pass
def GetDockerSafeUUID():
return str(uuid.uuid1()).replace('-','')
class Composition:
@classmethod
def RegisterCallbackInContext(cls, context, callback):
if not isinstance(callback, CompositionCallback):
raise TypeError("Expected type to be {0}, instead received {1}".format(CompositionCallback, type(callback)))
Composition.GetCompositionCallbacksFromContext(context).append(callback)
@classmethod
def GetCompositionCallbacksFromContext(cls, context):
if not "compositionCallbacks" in context:
context.compositionCallbacks = []
return context.compositionCallbacks
@classmethod
def GetUUID(cls):
return GetDockerSafeUUID()
def __init__(self, context, composeFilesYaml, projectName=None,
force_recreate=True, components=[], register_and_up=True):
self.contextHelper = ContextHelper.GetHelper(context=context)
if not projectName:
projectName = self.contextHelper.getGuuid()
self.projectName = projectName
self.context = context
self.containerDataList = []
self.composeFilesYaml = composeFilesYaml
self.serviceNames = []
self.serviceNames = self._collectServiceNames()
if register_and_up:
# Register with contextHelper (Supports docgen)
self.contextHelper.registerComposition(self)
[callback.composing(self, context) for callback in Composition.GetCompositionCallbacksFromContext(context)]
self.up(context, force_recreate, components)
def _collectServiceNames(self):
'First collect the services names.'
servicesList = [service for service in self.issueCommand(["config", "--services"]).splitlines() if "WARNING" not in service]
return servicesList
def up(self, context, force_recreate=True, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["up", "-d"]
if force_recreate:
command += ["--force-recreate"]
self.issueCommand(command + components)
def scale(self, context, serviceName, count=1):
self.serviceNames = self._collectServiceNames()
command = ["scale", "%s=%d" %(serviceName, count)]
self.issueCommand(command)
def stop(self, context, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["stop"]
self.issueCommand(command, components)
def start(self, context, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["start"]
self.issueCommand(command, components)
def getServiceNames(self):
return list(self.serviceNames)
def parseComposeFilesArg(self, composeFileArgs):
args = [arg for sublist in [["-f", file] for file in [file if not os.path.isdir(file) else os.path.join(file, 'docker-compose.yml') for file in composeFileArgs.split()]] for arg in sublist]
return args
def getFileArgs(self):
return self.parseComposeFilesArg(self.composeFilesYaml)
def getEnvAdditions(self):
myEnv = {}
myEnv["COMPOSE_PROJECT_NAME"] = self.projectName
myEnv["CORE_PEER_NETWORKID"] = self.projectName
# Invoke callbacks
[callback.getEnv(self, self.context, myEnv) for callback in Composition.GetCompositionCallbacksFromContext(self.context)]
return myEnv
def getEnv(self):
myEnv = os.environ.copy()
for key,value in self.getEnvAdditions().iteritems():
myEnv[key] = value
# myEnv["COMPOSE_PROJECT_NAME"] = self.projectName
# myEnv["CORE_PEER_NETWORKID"] = self.projectName
# # Invoke callbacks
# [callback.getEnv(self, self.context, myEnv) for callback in Composition.GetCompositionCallbacksFromContext(self.context)]
return myEnv
def getConfig(self):
return self.issueCommand(["config"])
def refreshContainerIDs(self):
containers = self.issueCommand(["ps", "-q"]).split()
return containers
def _callCLI(self, argList, expect_success, env):
return bdd_test_util.cli_call(argList, expect_success=expect_success, env=env)
def issueCommand(self, command, components=[]):
componentList = []
useCompose = True
for component in components:
if '_' in component:
useCompose = False
componentList.append("%s_%s" % (self.projectName, component))
else:
break
# If we need to perform an operation on a specific container, use
# docker not docker-compose
if useCompose:
cmdArgs = self.getFileArgs()+ command + components
cmd = ["docker-compose"] + cmdArgs
else:
cmdArgs = command + componentList
cmd = ["docker"] + cmdArgs
#print("cmd:", cmd)
output, error, returncode = \
self._callCLI(cmd, expect_success=True, env=self.getEnv())
# Don't rebuild if ps command
if command[0] !="ps" and command[0] !="config":
self.rebuildContainerData()
return output
def rebuildContainerData(self):
self.containerDataList[:] = []
for containerID in self.refreshContainerIDs():
# get container metadata
container = json.loads(bdd_test_util.cli_call(["docker", "inspect", containerID], expect_success=True)[0])[0]
# container name
container_name = container['Name'][1:]
# container ip address (only if container is running)
container_ipaddress = None
if container['State']['Running']:
container_ipaddress = container['NetworkSettings']['IPAddress']
if not container_ipaddress:
# ipaddress not found at the old location, try the new location
container_ipaddress = container['NetworkSettings']['Networks'].values()[0]['IPAddress']
# container environment
container_env = container['Config']['Env']
# container exposed ports
container_ports = container['NetworkSettings']['Ports']
# container docker-compose service
container_compose_service = container['Config']['Labels']['com.docker.compose.service']
self.containerDataList.append(ContainerData(container_name, container_ipaddress, container_env, container_compose_service, container_ports))
def decompose(self):
self.issueCommand(["unpause"])
self.issueCommand(["down"])
self.issueCommand(["kill"])
self.issueCommand(["rm", "-f"])
# Now remove associated chaincode containers if any
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["ps", "-qa", "--filter", "name={0}".format(self.projectName)], expect_success=True, env=self.getEnv())
for containerId in output.splitlines():
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["rm", "-f", containerId], expect_success=True, env=self.getEnv())
# Remove the associated network
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["network", "ls", "-q", "--filter", "name={0}".format(self.projectName)], expect_success=True, env=self.getEnv())
for networkId in output.splitlines():
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["network", "rm", networkId], expect_success=True, env=self.getEnv())
# Invoke callbacks
[callback.decomposing(self, self.context) for callback in Composition.GetCompositionCallbacksFromContext(self.context)] | unknown | codeparrot/codeparrot-clean | ||
"""
Support for Tellstick switches using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellduslive/
"""
import logging
from homeassistant.components.tellduslive import TelldusLiveEntity
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick switches."""
if discovery_info is None:
return
add_devices(TelldusLiveSwitch(hass, switch) for switch in discovery_info)
class TelldusLiveSwitch(TelldusLiveEntity, ToggleEntity):
"""Representation of a Tellstick switch."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.device.turn_on()
self.changed()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.device.turn_off()
self.changed() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
# This variable contains a path, which should point to the place
# where the Virtual Machine images are stored. In the case of Virtual
# Box, this will be the "Machines" folder, which under Windows is
# often located under "c:/Users/<user_name>/.VirtualBox". Its exact
# location might nonetheless vary depending on the Operating System
# used, the Virtual Machine system and the local configuration.
vm_storage_dir = """C:\\Users\\lrg\\.VirtualBox\\Machines"""
# Variables prefixed with "vbox" are specific to the Virtual Box
# Virtual Machine software.
# Name of the Virtual Box machine which this experiment uses.
# Should contain the name of the machine, which will often be
# different than the Hard Disk name.
vbox_vm_name = "WinDefVM"
# Name of the snapshot to which the Virtual Machine will be
# restored before every experiment use. The specified snapshot
# should feature a functional machine, already started and with
# the experiment ready to use. That way, the user will be able to
# start experimenting with it as fast as possible.
# It is particularly important to make sure that the snapshot is
# taken of an already started machine ( at the desktop, and with the
# password changing and remote desktop software running ).
vbox_base_snapshot = "ReadyRDP"
# The URL of the Virtual Machine. This is the URL that will be provided
# to the users for them to connect to, through their remote desktop
# software of choice. (Currently, either RDP or VNC).
vm_url = "192.168.51.79"
# The Virtual Machine software to use. Currently, only VirtualBox
# is supported, though the sytem is designed to be easily extensible
# and it shouldn't be particularly hard to add support for another
# one. (A new class with the appropriate name would need to be
# implemented, supporting the same interface).
vm_vm_type = "VirtualBox"
# The User Manager to employ. The User Manager prepares the machine
# for use, sending the appropriate query to it to change the password
# of the Virtual Machine. The default one, HttpQueryUserManager, is
# compatible with the VNC and RDP password changers that WebLab
# provides, and will simply send the request through HTTP to the
# URL specified below. Though the system is designed to be extensible,
# generally it won't be advisable to create a custom User Manager.
# In fact, even if we were to use a custom protocol with a custom
# password changer, it would be relatively easy to simply have it
# listen for this particular http query.
vm_user_manager_type = "HttpQueryUserManager"
# This is the URL to which password changing queries done by the
# default UserManager, HttpQueryUserManager, will be sent. This is,
# hence, the URL on which the password changing services should
# listen. It is important to make sure that the specified port
# here and the specified port in the password changing service
# configuration do match.
http_query_user_manager_url = "http://192.168.51.79:56789"
# Will save the image after every use if enabled. Generally, this is
# not advisable. In the future, support for snapshot saving might be
# implemented.
vm_should_store_image = False | unknown | codeparrot/codeparrot-clean | ||
"""
The MIT License (MIT)
Copyright (c) 2014 Jozef van Eenbergen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import curses
class Terminal(object):
def __init__(self, scrn=None):
self._screen = scrn if scrn else curses.initscr()
self._screen = curses.initscr()
curses.noecho()
curses.cbreak()
# curses.curs_set(0)
self._screen.keypad(1)
self._refresh_rate = 3
self._screen.timeout(self._refresh_rate * 1000)
self.selected_row = None
self.start_row = 0
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(9, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(11, curses.COLOR_BLACK, curses.COLOR_WHITE)
self._colors_list = {
'DEFAULT': curses.color_pair(1),
'UNDERLINE': curses.A_UNDERLINE,
'BOLD': curses.A_BOLD,
'SORT': curses.A_BOLD,
'OK': curses.color_pair(7),
'TITLE': curses.A_BOLD,
'PROCESS': curses.color_pair(7),
'STATUS': curses.color_pair(7),
'NICE': curses.color_pair(9),
'CAREFUL': curses.color_pair(8),
'WARNING': curses.color_pair(9),
'CRITICAL': curses.color_pair(6),
'OK_LOG': curses.color_pair(3),
'CAREFUL_LOG': curses.color_pair(4),
'WARNING_LOG': curses.color_pair(5),
'CRITICAL_LOG': curses.color_pair(2),
'SEPARATOR': curses.color_pair(10),
'REVERSE': curses.color_pair(11),
}
self._panels = {}
self._windows = {}
@property
def colors(self):
return self._colors_list
@property
def panels(self):
return self._panels
@property
def windows(self):
return self._windows
def getch(self):
return self._screen.getch()
def refresh(self):
return self._screen.refresh()
def get_size(self):
return self._screen.getmaxyx()
def stop(self):
curses.nocbreak()
self._screen.keypad(0)
curses.echo()
curses.endwin()
def create_window(self, name, height, width, top, left):
panel = Window(height, width, top, left, self)
self._windows[name] = panel
return panel
def create_panel(self, name, height, width):
panel = Panel(height, width)
self._panels[name] = panel
return panel
def add_line(self, text, top, left, color=None):
self._screen.addstr(top, left, text, color)
def up(self):
self.selected_row -= 1
def down(self):
self.selected_row += 1
class Window(object):
def __init__(self, height, width, top, left, parent):
self._panel = parent._screen.subwin(height, width, top, left)
self._parent = parent
self._panel.scrollok(1)
self._panel.idlok(1)
self._panel.touchwin()
def add_line(self, text, top, left, color=None):
self._panel.addstr(top, left, text, color)
def refresh(self):
return self._panel.refresh()
class Panel(object):
""" Wrapped newpad object
"""
def __init__(self, height, width):
self._panel = curses.newpad(height, width)
self.selected_row = 0
self.ptopy = 0
self.ptopx = 0
self.stopy = 0
self.stopx = 0
self.sbottomy = 0
self.sbottomx = 0
self.max = height
def set_max(self, value):
self.max = value
def add_line(self, text, top, left, color=None):
self._panel.addstr(top, left, text, color)
def refresh(self, ptopy, ptopx, stopy, stopx, sbottomy, sbottomx):
self.ptopx = ptopx
self.ptopy = ptopy
self.stopy = stopy
self.stopx = stopx
self.sbottomy = sbottomy
self.sbottomx = sbottomx
return self._panel.refresh(self.ptopy, self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx)
def getch(self):
return self._panel.getch()
def scroll_up(self):
self.refresh(max(self.ptopy - 1, 0), self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx)
def scroll_down(self):
self.refresh(min(self.ptopy+1, self.max), self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx) | unknown | codeparrot/codeparrot-clean | ||
"""Support for Verisure binary sensors."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
from . import CONF_DOOR_WINDOW, HUB as hub
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure binary sensors."""
sensors = []
hub.update_overview()
if int(hub.config.get(CONF_DOOR_WINDOW, 1)):
sensors.extend(
[
VerisureDoorWindowSensor(device_label)
for device_label in hub.get(
"$.doorWindow.doorWindowDevice[*].deviceLabel"
)
]
)
sensors.extend([VerisureEthernetStatus()])
add_entities(sensors)
class VerisureDoorWindowSensor(BinarySensorEntity):
"""Representation of a Verisure door window sensor."""
def __init__(self, device_label):
"""Initialize the Verisure door window sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the binary sensor."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].area",
self._device_label,
)
@property
def is_on(self):
"""Return the state of the sensor."""
return (
hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].state",
self._device_label,
)
== "OPEN"
)
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')]",
self._device_label,
)
is not None
)
# pylint: disable=no-self-use
def update(self):
"""Update the state of the sensor."""
hub.update_overview()
class VerisureEthernetStatus(BinarySensorEntity):
"""Representation of a Verisure VBOX internet status."""
@property
def name(self):
"""Return the name of the binary sensor."""
return "Verisure Ethernet status"
@property
def is_on(self):
"""Return the state of the sensor."""
return hub.get_first("$.ethernetConnectedNow")
@property
def available(self):
"""Return True if entity is available."""
return hub.get_first("$.ethernetConnectedNow") is not None
# pylint: disable=no-self-use
def update(self):
"""Update the state of the sensor."""
hub.update_overview()
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_CONNECTIVITY | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume.drivers.hitachi import hnas_backend
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
CONF = cfg.CONF
HNAS_RESULT1 = "\n\
FS ID FS Label FS Permanent ID EVS ID EVS Label\n\
----- ----------- ------------------ ------ ---------\n\
1026 gold 0xaadee0e035cfc0b7 1 EVSTest1\n\
1025 fs01-husvm 0xaada5dff78668800 1 EVSTest1\n\
1027 large-files 0xaadee0ef012a0d54 1 EVSTest1\n\
1028 platinun 0xaadee1ea49d1a32c 1 EVSTest1\n\
1029 test_hdp 0xaadee09634acfcac 1 EVSTest1\n\
1030 cinder1 0xaadfcf742fba644e 1 EVSTest1\n\
1031 cinder2 0xaadfcf7e0769a6bc 1 EVSTest1\n\
1024 fs02-husvm 0xaac8715e2e9406cd 2 EVSTest2\n\
\n"
HNAS_RESULT2 = "cluster MAC: 83-68-96-AA-DA-5D"
HNAS_RESULT3 = "\n\
Model: HNAS 4040 \n\
Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\
Hardware: NAS Platform (M2SEKW1339109) \n\
board MMB1 \n\
mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\
board MFB1 \n\
mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \
RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \
WD v00E2 DI v001A FC v0002 \n\
Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\
board MCP \n\
Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\
\n"
HNAS_RESULT4 = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\
admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\
\n"
HNAS_RESULT5 = "\n\
ID Label EVS Size Used Snapshots Deduped\
Avail Thin ThinSize ThinAvail \
FS Type \n\
---- ----------- --- ------- ------------- --------- -------\
- ------------- ---- -------- --------- ---------------------\
------------- \n\
1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA \
228 GB (91%) No 32 KB,\
WFS-2,128 DSBs\n\
1026 gold 1 19.9 GB 2.30 GB (12% NA 0 B (0%)\
17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
dedupe enabled\n\
1027 large-files 1 19.8 GB 2.43 GB (12%) 0 B (0%) NA \
17.3 GB (88%) No 32 KB,\
WFS-2,128 DSBs\n\
1028 platinun 1 19.9 GB 2.30 GB (12%) NA 0 B (0%)\
17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
dedupe enabled\n\
1029 silver 1 19.9 GB 3.19 GB (16%) 0 B (0%) NA \
6.7 GB (84%) No 4 KB,\
WFS-2,128 DSBs\n\
1030 cinder1 1 40.8 GB 2.24 GB (5%) 0 B (0%) NA \
38.5 GB (95%) No 4 KB,\
WFS-2,128 DSBs\n\
1031 cinder2 1 39.8 GB 2.23 GB (6%) 0 B (0%) NA \
37.6 GB (94%) No 4 KB,\
WFS-2,128 DSBs\n\
1024 fs02-husvm 2 49.8 GB 3.54 GB (7%) 0 B (0%) NA \
46.2 GB (93%) No 32 KB,\
WFS-2,128 DSBs\n\
1032 test 2 3.97 GB 2.12 GB (53%) 0 B (0%) NA \
1.85 GB (47%) No 4 KB,\
WFS-2,128 DSBs\n\
1058 huge_FS 7 1.50 TB Not determined\n\
1053 fs-unmounted 4 108 GB Not mounted \
NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
WFS-2,128 DSBs,dedupe enabled\n\
\n"
HNAS_RESULT6 = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
HNAS_RESULT7 = "\n\
Export configuration: \n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: test_hdp \n\
File system size: 250 GB \n\
File system free space: 228 GB \n\
File system state: \n\
formatted = Yes \n\
mounted = Yes \n\
failed = No \n\
thin provisioned = No \n\
Access snapshots: Yes \n\
Display snapshots: Yes \n\
Read Caching: Disabled \n\
Disaster recovery setting: \n\
Recovered = No \n\
Transfer setting = Use file system default \n\
\n"
HNAS_RESULT8 = "Logical unit creation started at 2014-12-24 00:38:30+00:00."
HNAS_RESULT9 = "Logical unit deleted successfully."
HNAS_RESULT10 = ""
HNAS_RESULT11 = "Logical unit expansion started at 2014-12-24 01:25:03+00:00."
HNAS_RESULT12 = "\n\
Alias : test_iqn \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
Comment : \n\
Secret : test_secret \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n"
HNAS_RESULT13 = "Logical unit added successfully."
HNAS_RESULT14 = "Logical unit removed successfully."
HNAS_RESULT15 = "Target created successfully."
HNAS_RESULT16 = ""
HNAS_RESULT17 = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 2 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
\n"
HNAS_RESULT18 = "Version: 11.1.3225.01\n\
Directory: /u/u60/_Eng_Axalon_SMU/OfficialBuilds/fish/angel/3225.01/main/bin/\
x86_64_linux-bart_libc-2.7_release\n\
Date: Feb 22 2013, 04:10:09\n\
\n"
HNAS_RESULT19 = " ID Label Size Used Snapshots \
Deduped Avail Thin ThinSize ThinAvail FS Type\n\
---- ------------- ------- ------------- --------- ------- -------------\
---- -------- --------- -------------------\n\
1025 fs01-husvm 250 GB 47.1 GB (19%) 0 B (0%) NA 203 GB (81%)\
No 4 KB,WFS-2,128 DSBs\n\
1047 manage_test02 19.9 GB 9.29 GB (47%) 0 B (0%) NA 10.6 GB (53%)\
No 4 KB,WFS-2,128 DSBs\n\
1058 huge_FS 7 1.50 TB Not determined\n\
1053 fs-unmounted 4 108 GB Not mounted \
NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
WFS-2,128 DSBs,dedupe enabled\n\
\n"
HNAS_RESULT20 = "\n\
Alias : test_iqn \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
Comment : \n\
Secret : \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n"
HNAS_RESULT20 = "Target does not exist."
HNAS_RESULT21 = "Target created successfully."
HNAS_RESULT22 = "Failed to establish SSC connection"
HNAS_RESULT23 = "\n\
Alias : cinder-Gold\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-gold\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-GoldIsh\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-default\n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\
Comment :\n\
Secret : pxr6U37LZZJBoMc\n\
Authentication : Disabled\n\
Logical units : Logical units :\n\
\n\
LUN Logical Unit\n\
---- --------------------------------\n\
0 volume-8ddd1a54-9daf-4fa5-842...\n\
1 volume-99da7ae7-1e7f-4d57-8bf...\n\
\n\
Access configuration :\n\
"
HNAS_RESULT24 = "Logical unit modified successfully."
HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)."
HNAS_RESULT26 = "Name : volume-test \n\
Comment: \n\
Path : /.cinder/volume-test.iscsi \n\
Size : 2 GB \n\
File System : fs1 \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
HNAS_CMDS = {
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'):
["%s" % HNAS_RESULT1, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'cluster-getmac',):
["%s" % HNAS_RESULT2, ""],
('ssh', '-version',): ["%s" % HNAS_RESULT18, ""],
('ssh', '-u', 'supervisor', '-p', 'supervisor', '0.0.0.0', 'ver',):
["%s" % HNAS_RESULT3, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'ver',):
["%s" % HNAS_RESULT3, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-l'):
["%s" % HNAS_RESULT4, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a'):
["%s" % HNAS_RESULT5, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-f', 'test_hdp'):
["%s" % HNAS_RESULT6, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'for-each-evs', '-q',
'nfs-export', 'list'):
["%s" % HNAS_RESULT7, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'add', '-e', 'test_name',
'test_hdp', '/.cinder/test_name.iscsi',
'1M'):
["%s" % HNAS_RESULT8, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'del', '-d', '-f',
'test_lun'):
["%s" % HNAS_RESULT9, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'file-clone-create', '-f', 'fs01-husvm',
'/.cinder/test_lu.iscsi', 'cloned_lu'):
["%s" % HNAS_RESULT10, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'expand', 'expanded_lu',
'1M'):
["%s" % HNAS_RESULT11, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'list', 'test_iqn'):
["%s" % HNAS_RESULT12, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'test_iqn',
'test_lun', '0'):
["%s" % HNAS_RESULT13, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'dellu', 'test_iqn',
0):
["%s" % HNAS_RESULT14, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'add', 'myTarget',
'secret'):
["%s" % HNAS_RESULT15, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'mod', '-s',
'test_secret', '-a', 'enable', 'test_iqn'): ["%s" % HNAS_RESULT15, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-lu', 'clone', '-e', 'test_lu',
'test_clone',
'/.cinder/test_clone.iscsi'):
["%s" % HNAS_RESULT16, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-e', '1'):
["%s" % HNAS_RESULT17, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor',
'console-context', '--evs', '1', 'iscsi-target', 'list'):
["%s" % HNAS_RESULT23, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-target', 'addlu', 'cinder-default',
'volume-8ddd1a54-0000-0000-0000', '2'):
["%s" % HNAS_RESULT13, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'selectfs', 'fs01-husvm'):
["%s" % HNAS_RESULT25, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-lu', 'list', 'test_lun'):
["%s" % HNAS_RESULT26, ""],
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
'1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'):
["%s" % HNAS_RESULT24, ""]
}
DRV_CONF = {'ssh_enabled': 'True',
'mgmt_ip0': '0.0.0.0',
'cluster_admin_ip0': None,
'ssh_port': '22',
'ssh_private_key': 'test_key',
'username': 'supervisor',
'password': 'supervisor'}
UTILS_EXEC_OUT = ["output: test_cmd", ""]
def m_run_cmd(*args, **kargs):
return HNAS_CMDS.get(args)
class HDSHNASBendTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(HDSHNASBendTest, self).__init__(*args, **kwargs)
@mock.patch.object(nfs, 'factory_bend')
def setUp(self, m_factory_bend):
super(HDSHNASBendTest, self).setUp()
self.hnas_bend = hnas_backend.HnasBackend(DRV_CONF)
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('paramiko.RSAKey.from_private_key_file')
@mock.patch('paramiko.SSHClient')
@mock.patch.object(putils, 'ssh_execute',
return_value=(HNAS_RESULT5, ''))
@mock.patch.object(utils, 'execute')
@mock.patch.object(time, 'sleep')
def test_run_cmd(self, m_sleep, m_utl, m_ssh, m_ssh_cli,
m_pvt_key, m_file, m_open):
save_hkey_file = CONF.ssh_hosts_key_file
save_spath = CONF.state_path
CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
CONF.state_path = '/var/lib/cinder'
# Test main flow
out, err = self.hnas_bend.run_cmd('ssh', '0.0.0.0',
'supervisor', 'supervisor',
'df', '-a')
self.assertIn('fs01-husvm', out)
self.assertIn('WFS-2,128 DSBs', out)
# Test exception throwing when not using SSH
m_utl.side_effect = putils.ProcessExecutionError(stdout='',
stderr=HNAS_RESULT22,
exit_code=255)
self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor',
'df', '-a')
# Test exception throwing when using SSH
m_ssh.side_effect = putils.ProcessExecutionError(stdout='',
stderr=HNAS_RESULT22,
exit_code=255)
self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor',
'df', '-a')
CONF.state_path = save_spath
CONF.ssh_hosts_key_file = save_hkey_file
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
@mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT)
def test_get_version(self, m_cmd, m_exec):
out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('11.2.3319.14', out)
self.assertIn('83-68-96-AA-DA-5D', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_get_iscsi_info(self, m_execute):
out = self.hnas_bend.get_iscsi_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertIn('172.24.44.20', out)
self.assertIn('172.24.44.21', out)
self.assertIn('10.0.0.20', out)
self.assertEqual(4, len(out.split('\n')))
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_hdp_info(self, m_run_cmd):
# tests when there is two or more evs
m_run_cmd.return_value = (HNAS_RESULT5, "")
out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(10, len(out.split('\n')))
self.assertIn('gold', out)
self.assertIn('silver', out)
line1 = out.split('\n')[0]
self.assertEqual(12, len(line1.split()))
# test when there is only one evs
m_run_cmd.return_value = (HNAS_RESULT19, "")
out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(3, len(out.split('\n')))
self.assertIn('fs01-husvm', out)
self.assertIn('manage_test02', out)
line1 = out.split('\n')[0]
self.assertEqual(12, len(line1.split()))
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_get_nfs_info(self, m_run_cmd):
out = self.hnas_bend.get_nfs_info("ssh", "0.0.0.0", "supervisor",
"supervisor")
self.assertEqual(2, len(out.split('\n')))
self.assertIn('/export01-husvm', out)
self.assertIn('172.24.44.20', out)
self.assertIn('10.0.0.20', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_create_lu(self, m_cmd):
out = self.hnas_bend.create_lu("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "1",
"test_name")
self.assertIn('successfully created', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_delete_lu(self, m_cmd):
out = self.hnas_bend.delete_lu("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "test_lun")
self.assertIn('deleted successfully', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_create_dup(self, m_cmd):
out = self.hnas_bend.create_dup("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_lu", "test_hdp",
"1", "test_clone")
self.assertIn('successfully created', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_file_clone(self, m_cmd):
out = self.hnas_bend.file_clone("ssh", "0.0.0.0", "supervisor",
"supervisor", "fs01-husvm",
"/.cinder/test_lu.iscsi", "cloned_lu")
self.assertIn('LUN cloned_lu HDP', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_extend_vol(self, m_cmd):
out = self.hnas_bend.extend_vol("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_hdp", "test_lun",
"1", "expanded_lu")
self.assertIn('successfully extended', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_add_iscsi_conn(self, m_cmd):
out = self.hnas_bend.add_iscsi_conn("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-0000-0000-0000",
"test_hdp", "test_port",
"cinder-default", "test_init")
self.assertIn('successfully paired', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_del_iscsi_conn(self, m_cmd):
out = self.hnas_bend.del_iscsi_conn("ssh", "0.0.0.0", "supervisor",
"supervisor", "1", "test_iqn", 0)
self.assertIn('already deleted', out)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=0)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targetiqn(self, m_cmd, m_get_evs):
m_cmd.side_effect = [[HNAS_RESULT12, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp", "test_secret")
self.assertEqual('test_iqn', out)
m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn2",
"test_hdp", "test_secret")
self.assertEqual('test_iqn2', out)
m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn3",
"test_hdp", "")
self.assertEqual('test_iqn3', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
side_effect=m_run_cmd)
def test_set_targetsecret(self, m_execute):
self.hnas_bend.set_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp", "test_secret")
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targetsecret(self, m_run_cmd):
# test when target has secret
m_run_cmd.return_value = (HNAS_RESULT12, "")
out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp")
self.assertEqual('test_secret', out)
# test when target don't have secret
m_run_cmd.return_value = (HNAS_RESULT20, "")
out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
"supervisor", "test_iqn",
"test_hdp")
self.assertEqual('', out)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
def test_get_targets(self, m_run_cmd):
# Test normal behaviour
m_run_cmd.return_value = (HNAS_RESULT23, "")
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1)
self.assertEqual(3, len(tgt_list))
self.assertEqual(2, len(tgt_list[2]['luns']))
# Test calling with parameter
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1,
'cinder-default')
self.assertEqual(1, len(tgt_list))
self.assertEqual(2, len(tgt_list[0]['luns']))
# Test error in BE command
m_run_cmd.side_effect = putils.ProcessExecutionError
tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
"supervisor", 1)
self.assertEqual(0, len(tgt_list))
@mock.patch.object(hnas_backend.HnasBackend,
'run_cmd', side_effect=m_run_cmd)
def test_check_targets(self, m_run_cmd):
result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
"supervisor",
"supervisor", "test_hdp",
"cinder-default")
self.assertTrue(result)
self.assertEqual('cinder-default', tgt['alias'])
result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
"supervisor",
"supervisor", "test_hdp",
"cinder-no-target")
self.assertFalse(result)
self.assertIsNone(tgt)
@mock.patch.object(hnas_backend.HnasBackend,
'run_cmd', side_effect=m_run_cmd)
def test_check_lu(self, m_run_cmd):
ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-9daf-4fa5-842",
"test_hdp")
result, lunid, tgt = ret
self.assertTrue(result)
self.assertEqual('0', lunid)
ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
"supervisor",
"volume-8ddd1a54-0000-0000-000",
"test_hdp")
result, lunid, tgt = ret
self.assertFalse(result)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
return_value = (HNAS_RESULT26, ""))
def test_get_existing_lu_info(self, m_run_cmd, m_get_evs):
out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0",
"supervisor",
"supervisor", "fs01-husvm",
"test_lun")
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'fs01-husvm')
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'console-context',
'--evs', 1, 'iscsi-lu', 'list',
'test_lun')
self.assertEqual(HNAS_RESULT26, out)
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
return_value=(HNAS_RESULT24, ""))
def test_rename_existing_lu(self, m_run_cmd, m_get_evs):
out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0",
"supervisor",
"supervisor", "fs01-husvm",
"vol_test",
"new_vol_test")
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'fs01-husvm')
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
'supervisor', 'console-context',
'--evs', 1, 'iscsi-lu', 'mod',
'-n', 'vol_test', 'new_vol_test')
self.assertEqual(HNAS_RESULT24, out) | unknown | codeparrot/codeparrot-clean | ||
from feature_extraction.post_processing.regex.regex_lib import RegexLib
import re
import datetime
import time
import unicodedata
from util.log import Log
import math
class EntityExtraction:
regex_bin = None
one_month = 86400 * 30 # unix time for 1 month
month_dict = {
'janvier': 1,
'fevrier': 2,
'mars': 3,
'avril': 4,
'mai': 5,
'juin': 6,
'juillet': 7,
'aout': 8,
'septembre': 9,
"octobre": 10,
'novembre': 11,
'decembre': 12
}
def __init__(self):
pass
@staticmethod
def match_any_regex(text, regex_array, regex_type):
"""
1) Loads the regex binaries only once. If it is loaded then continue.
2) Iterate all the regex and search text
3) if regex finds a match then extract entity from this sub sentence
:param text: String representation of precedent
:param regex_array: List of regex
:param regex_type: Entity we look for in a particular regex match
:return: (Boolean, entity<int>)
"""
if EntityExtraction.regex_bin is None:
EntityExtraction.regex_bin = RegexLib.model
for regex in regex_array:
regex_result = regex.search(text)
if regex_result:
sentence = regex_result.group(0).lower()
return EntityExtraction.__extract_regex_entity(sentence, regex_type)
return False, 0
@staticmethod
def __extract_regex_entity(sentence, regex_type):
"""
Entity extraction from the text
1) If the type is BOOLEAN then simply return True, 1
2) If the type is MONEY_REGEX then extract the money value and format string so that it is
convertible to integer
3) else return False, 1
:param sentence: sub sentence from text to apply regex
:param regex_type: type of information to extract
:return: (boolean, int)
"""
# removes accents
nfkd_form = unicodedata.normalize('NFKD', sentence)
sentence = u"".join([character for character in nfkd_form if not unicodedata.combining(character)])
if regex_type == 'BOOLEAN':
return True, 1
elif regex_type == 'MONEY_REGEX':
return EntityExtraction.__regex_money(regex_type, sentence)
elif regex_type == 'DATE_REGEX':
return EntityExtraction.get_fact_duration(sentence)
return False, 0
@staticmethod
def get_fact_duration(sentence):
"""
Tries to find date range within a sentence by trying to match it against regexes.
First regex looks for the following format: 1er decembre 20** [a|au ...] 30 mai 20**
Second regex looks for 1 or more months being stated
convert to unix.
1) unless specified, start date is assumes to be the first day of the month
2) unless specified, end date is assume to be the last day of the month. 28 is chosen because
every month have at least 28 days
The information captured be the regexes above allows us to get the time difference in days
:param sentence: sentence to extract entities
:return: boolean (date found), integer (months between dates)
"""
# Verify if the sentence is about non-payment
non_payment_regex = re.compile("pas paye", re.IGNORECASE)
if re.findall(non_payment_regex, sentence).__len__() == 0:
return False, 0
# First regex
start_end_date_regex = re.compile(RegexLib.DATE_RANGE_REGEX, re.IGNORECASE)
entities = re.findall(start_end_date_regex, sentence)
if entities.__len__() > 0:
entities = re.findall(start_end_date_regex, sentence).pop(0)
try:
start_day = int(entities[0])
except ValueError as error:
Log.write(str(error) + ": could not convert " + entities[0] + " to an int")
start_day = '1'
start_month = ''
try:
start_month = str(EntityExtraction.month_dict[entities[1]])
except IndexError as error:
Log.write(str(error) + ":" + str(start_month) + " is not a month or has spelling mistake")
return False, 0
try:
start_year = int(entities[2])
except ValueError as error:
Log.write(str(error) + ": could not find start year")
start_year = entities[5] # set end year value
try:
end_day = int(entities[3])
except ValueError as error:
Log.write(str(error) + ": could not convert " + entities[3] + " to an int")
end_day = '28'
end_month = ''
try:
end_month = str(EntityExtraction.month_dict[entities[4]])
except IndexError as error:
Log.write(str(error) + ":" + str(end_month) + " is not a month or has spelling mistake")
return False, 0
end_year = entities[5]
start_unix = EntityExtraction.__date_to_unix([str(start_day), str(start_month), str(start_year)])
end_unix = EntityExtraction.__date_to_unix([str(end_day), str(end_month), str(end_year)])
return True, EntityExtraction.__get_time_interval_in_months(start_unix, end_unix)
# Second regex
month_regex = re.compile(RegexLib.DATE_REGEX, re.IGNORECASE)
entities = re.findall(month_regex, sentence)
if entities.__len__() > 0:
return True, entities.__len__() # total months found
return False, 0
@staticmethod
def __regex_money(regex_type, sentence):
"""
1) create the date regex --> re.compile(regex string)
2) Find the dollar amount in the sentence
3) filter the string by removing unecessary characters
4) return the entity
:param regex_type: str(MONEY_REGEX)
:param sentence: boolean, integer
:return:
"""
generic_regex = re.compile(EntityExtraction.regex_bin[regex_type])
entity = generic_regex.search(sentence).group(0)
# Functional but not sure about how optimal it is
entity = entity.replace("$", "")
entity = entity.replace(" ", "")
entity = entity.replace(",", ".")
if entity[-1] == '.':
entity = entity[:-1]
return True, entity
@staticmethod
def __date_to_unix(date):
"""
Given a date list (ex: [30,12,2019]) this function gets the unix time that represents this date
:param date: date to convert into unix time
:return: unix time representing the input date
"""
date_string = " ".join(date)
try:
unix_time = time.mktime(datetime.datetime.strptime(date_string, '%d %m %Y').timetuple())
except (ValueError, OverflowError) as error:
Log.write(str(error) + ": " + str(date_string))
return None
return unix_time
@staticmethod
def __get_time_interval_in_months(first_date, second_date):
"""
Calculates the time difference between 2 dates
:param first_date: date in unix time
:param second_date: date in unix time
:return: time difference between 2 dates
"""
return math.ceil(abs(first_date - second_date) / EntityExtraction.one_month) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for various parts of L{twisted.web}.
"""
import os
import zlib
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import reflect
from twisted.python.compat import _PY3
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.task import Clock
from twisted.web import server, resource
from twisted.web import iweb, http, error
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
from twisted.web.static import Data
class ResourceTests(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.assertEqual([], r.listEntities())
class SimpleResource(resource.Resource):
"""
@ivar _contentType: C{None} or a C{str} giving the value of the
I{Content-Type} header in the response this resource will render. If it
is C{None}, no I{Content-Type} header will be set in the response.
"""
def __init__(self, contentType=None):
resource.Resource.__init__(self)
self._contentType = contentType
def render(self, request):
if self._contentType is not None:
request.responseHeaders.setRawHeaders(
b"content-type", [self._contentType])
if http.CACHED in (request.setLastModified(10),
request.setETag(b'MatchingTag')):
return b''
else:
return b"correct"
class SiteTest(unittest.TestCase):
"""
Unit tests for L{server.Site}.
"""
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{b""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild(b"",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([b''])),
sres2, "Got the wrong resource.")
def test_defaultRequestFactory(self):
"""
L{server.Request} is the default request factory.
"""
site = server.Site(resource=SimpleResource())
self.assertIs(server.Request, site.requestFactory)
def test_constructorRequestFactory(self):
"""
Can be initialized with a custom requestFactory.
"""
customFactory = object()
site = server.Site(
resource=SimpleResource(), requestFactory=customFactory)
self.assertIs(customFactory, site.requestFactory)
def test_buildProtocol(self):
"""
Returns a C{Channel} whose C{site} and C{requestFactory} attributes are
assigned from the C{site} instance.
"""
site = server.Site(SimpleResource())
channel = site.buildProtocol(None)
self.assertIs(site, channel.site)
self.assertIs(site.requestFactory, channel.requestFactory)
class SessionTests(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = Clock()
self.uid = b'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), b'123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split(b'\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split(b'\r\n\r\n', 1)[0]
for header in headers.split(b'\r\n'):
if header.lower().startswith(key):
return header.split(b':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split(b'\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTests(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild(b'', self.resrc)
self.resrc.putChild(b'with-content-type', SimpleResource(b'image/jpeg'))
self.site = server.Site(self.resrc)
self.site.startFactory()
self.addCleanup(self.site.stopFactory)
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince=None, etag=None):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since} header or
the value C{etag} for the I{If-Not-Match} header, verify that a response
with a 200 code, a default Content-Type, and the resource as the body is
returned.
"""
if modifiedSince is not None:
validator = b"If-Modified-Since: " + modifiedSince
else:
validator = b"If-Not-Match: " + etag
for line in [b"GET / HTTP/1.1", validator, b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.OK)
self.assertEqual(httpBody(result), b"correct")
self.assertEqual(httpHeader(result, b"Content-Type"), b"text/html")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(modifiedSince=http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with a
timestamp indicating a time after the last modification of the request
resource, a 304 response is returned along with an empty response body
and no Content-Type header if the application does not set one.
"""
for line in [b"GET / HTTP/1.1",
b"If-Modified-Since: " + http.datetimeToString(100), b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
# Since there SHOULD NOT (RFC 2616, section 10.3.5) be any
# entity-headers, the Content-Type is not set if the application does
# not explicitly set it.
self.assertEqual(httpHeader(result, b"Content-Type"), None)
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""
If a request is made with an I{If-None-Match} ETag which does not match
the current ETag of the requested resource, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(etag=b"unmatchedTag")
def test_etagMatched(self):
"""
If a request is made with an I{If-None-Match} ETag which does match the
current ETag of the requested resource, a 304 response is returned along
with an empty response body.
"""
for line in [b"GET / HTTP/1.1", b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpHeader(result, b"ETag"), b"MatchingTag")
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
def test_unmodifiedWithContentType(self):
"""
Similar to L{test_etagMatched}, but the response should include a
I{Content-Type} header if the application explicitly sets one.
This I{Content-Type} header SHOULD NOT be present according to RFC 2616,
section 10.3.5. It will only be present if the application explicitly
sets it.
"""
for line in [b"GET /with-content-type HTTP/1.1",
b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
self.assertEqual(httpHeader(result, b"Content-Type"), b"image/jpeg")
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar/', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
request.setHost(b'example.com', 80)
self.assertEqual(request.prePathURL(), b'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'foo.com', 81, 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo%2Fbar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com/foo%2Fbar')
class GzipEncoderTests(unittest.TestCase):
if _PY3:
skip = "GzipEncoder not ported to Python 3 yet."
def setUp(self):
self.channel = DummyChannel()
staticResource = Data(b"Some data", "text/plain")
wrapped = resource.EncodingResourceWrapper(
staticResource, [server.GzipEncoderFactory()])
self.channel.site.resource.putChild(b"foo", wrapped)
def test_interfaces(self):
"""
L{server.GzipEncoderFactory} implements the
L{iweb._IRequestEncoderFactory} and its C{encoderForRequest} returns an
instance of L{server._GzipEncoder} which implements
L{iweb._IRequestEncoder}.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
factory = server.GzipEncoderFactory()
self.assertTrue(verifyObject(iweb._IRequestEncoderFactory, factory))
encoder = factory.encoderForRequest(request)
self.assertTrue(verifyObject(iweb._IRequestEncoder, encoder))
def test_encoding(self):
"""
If the client request passes a I{Accept-Encoding} header which mentions
gzip, L{server._GzipEncoder} automatically compresses the data.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_nonEncoding(self):
"""
L{server.GzipEncoderFactory} doesn't return a L{server._GzipEncoder} if
the I{Accept-Encoding} header doesn't mention gzip support.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"foo,bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertIn(b"Content-Length", data)
self.assertNotIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data", body)
def test_multipleAccept(self):
"""
If there are multiple I{Accept-Encoding} header,
L{server.GzipEncoderFactory} reads them properly to detect if gzip is
supported.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_alreadyEncoded(self):
"""
If the content is already encoded and the I{Content-Encoding} header is
set, L{server.GzipEncoderFactory} properly appends gzip to it.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: deflate,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_multipleEncodingLines(self):
"""
If there are several I{Content-Encoding} headers,
L{server.GzipEncoderFactory} normalizes it and appends gzip to the
field value.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"foo", b"bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: foo,bar,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTests(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild(b'foo', rr)
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(r)
for url in [b'/foo/', b'/foo/bar', b'/foo/bar/baz', b'/foo/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(rr)
for url in [b'/', b'/bar', b'/bar/baz', b'/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return b"hi hi"
def render_HEH(self, request):
return b"ho ho"
@implementer(resource.IResource)
class HeadlessResource(object):
"""
A resource that implements GET but not HEAD.
"""
allowedMethods = [b"GET"]
def render(self, request):
"""
Leave the request open for future writes.
"""
self.request = request
if request.method not in self.allowedMethods:
raise error.UnsupportedMethod(self.allowedMethods)
self.request.write(b"some data")
return server.NOT_DONE_YET
class NewRenderTests(unittest.TestCase):
"""
Tests for L{server.Request.render}.
"""
def _getReq(self, resource=None):
"""
Create a request object with a stub channel and install the
passed resource at /newrender. If no resource is passed,
create one.
"""
d = DummyChannel()
if resource is None:
resource = NewRenderResource()
d.site.resource.putChild(b'newrender', resource)
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived(b'GET', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'hi hi')
req = self._getReq()
req.requestReceived(b'HEH', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived(b'CONNECT', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
req = self._getReq()
req.requestReceived(b'hlalauguG', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
def test_notAllowedMethod(self):
"""
When trying to invoke a method not in the allowed method list, we get
a response saying it is not allowed.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 405)
self.assertTrue(req.responseHeaders.hasHeader(b"allow"))
raw_header = req.responseHeaders.getRawHeaders(b'allow')[0]
allowed = sorted([h.strip() for h in raw_header.split(b",")])
self.assertEqual([b'GET', b'HEAD', b'HEH'], allowed)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived(b'HEAD', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 200)
self.assertEqual(-1, req.transport.getvalue().find(b'hi hi'))
def test_unsupportedHead(self):
"""
HEAD requests against resource that only claim support for GET
should not include a body in the response.
"""
resource = HeadlessResource()
req = self._getReq(resource)
req.requestReceived(b"HEAD", b"/newrender", b"HTTP/1.0")
headers, body = req.transport.getvalue().split(b'\r\n\r\n')
self.assertEqual(req.code, 200)
self.assertEqual(body, b'')
def test_noBytesResult(self):
"""
When implemented C{render} method does not return bytes an internal
server error is returned.
"""
class RiggedRepr(object):
def __repr__(self):
return 'my>repr'
result = RiggedRepr()
no_bytes_resource = resource.Resource()
no_bytes_resource.render = lambda request: result
request = self._getReq(no_bytes_resource)
request.requestReceived(b"GET", b"/newrender", b"HTTP/1.0")
headers, body = request.transport.getvalue().split(b'\r\n\r\n')
self.assertEqual(request.code, 500)
expected = [
'',
'<html>',
' <head><title>500 - Request did not return bytes</title></head>',
' <body>',
' <h1>Request did not return bytes</h1>',
' <p>Request: <pre><%s></pre><br />'
'Resource: <pre><%s></pre><br />'
'Value: <pre>my>repr</pre></p>' % (
reflect.safe_repr(request)[1:-1],
reflect.safe_repr(no_bytes_resource)[1:-1],
),
' </body>',
'</html>',
'']
self.assertEqual('\n'.join(expected).encode('ascii'), body)
class GettableResource(resource.Resource):
"""
Used by AllowedMethodsTests to simulate an allowed method.
"""
def render_GET(self):
pass
def render_fred_render_ethel(self):
"""
The unusual method name is designed to test the culling method
in C{twisted.web.resource._computeAllowedMethods}.
"""
pass
class AllowedMethodsTests(unittest.TestCase):
"""
'C{twisted.web.resource._computeAllowedMethods} is provided by a
default should the subclass not provide the method.
"""
if _PY3:
skip = "Allowed methods functionality not ported to Python 3."
def _getReq(self):
"""
Generate a dummy request for use by C{_computeAllowedMethod} tests.
"""
d = DummyChannel()
d.site.resource.putChild(b'gettableresource', GettableResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def test_computeAllowedMethods(self):
"""
C{_computeAllowedMethods} will search through the
'gettableresource' for all attributes/methods of the form
'render_{method}' ('render_GET', for example) and return a list of
the methods. 'HEAD' will always be included from the
resource.Resource superclass.
"""
res = GettableResource()
allowedMethods = resource._computeAllowedMethods(res)
self.assertEqual(set(allowedMethods),
set([b'GET', b'HEAD', b'fred_render_ethel']))
def test_notAllowed(self):
"""
When an unsupported method is requested, the default
L{_computeAllowedMethods} method will be called to determine the
allowed methods, and the HTTP 405 'Method Not Allowed' status will
be returned with the allowed methods will be returned in the
'Allow' header.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 405)
self.assertEqual(
set(req.responseHeaders.getRawHeaders(b'allow')[0].split(b", ")),
set([b'GET', b'HEAD', b'fred_render_ethel'])
)
def test_notAllowedQuoting(self):
"""
When an unsupported method response is generated, an HTML message will
be displayed. That message should include a quoted form of the URI and,
since that value come from a browser and shouldn't necessarily be
trusted.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource?'
b'value=<script>bad', b'HTTP/1.0')
self.assertEqual(req.code, 405)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<script>bad", renderedPage)
self.assertIn(b'<script>bad', renderedPage)
def test_notImplementedQuoting(self):
"""
When an not-implemented method response is generated, an HTML message
will be displayed. That message should include a quoted form of the
requested method, since that value come from a browser and shouldn't
necessarily be trusted.
"""
req = self._getReq()
req.requestReceived(b'<style>bad', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 501)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<style>bad", renderedPage)
self.assertIn(b'<style>bad', renderedPage)
class DummyRequestForLogTest(DummyRequest):
uri = b'/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = b'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class AccessLogTestsMixin(object):
"""
A mixin for L{TestCase} subclasses defining tests that apply to
L{HTTPFactory} and its subclasses.
"""
def factory(self, *args, **kwargs):
"""
Get the factory class to apply logging tests to.
Subclasses must override this method.
"""
raise NotImplementedError("Subclass failed to override factory")
def test_combinedLogFormat(self):
"""
The factory's C{log} method writes a I{combined log format} line to the
factory's log file.
"""
reactor = Clock()
# Set the clock to an arbitrary point in time. It doesn't matter when
# as long as it corresponds to the timestamp in the string literal in
# the assertion below.
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(logPath=logPath)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# Client IP
b'"1.2.3.4" '
# Some blanks we never fill in
b'- - '
# The current time (circa 1234567890)
b'[13/Feb/2009:23:31:30 +0000] '
# Method, URI, version
b'"GET /dummy HTTP/1.0" '
# Response code
b'123 '
# Response length
b'- '
# Value of the "Referer" header. Probably incorrectly quoted.
b'"-" '
# Value pf the "User-Agent" header. Probably incorrectly quoted.
b'"-"' + self.linesep,
FilePath(logPath).getContent())
def test_logFormatOverride(self):
"""
If the factory is initialized with a custom log formatter then that
formatter is used to generate lines for the log file.
"""
def notVeryGoodFormatter(timestamp, request):
return u"this is a bad log format"
reactor = Clock()
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(
logPath=logPath, logFormatter=notVeryGoodFormatter)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# self.linesep is a sad thing.
# https://twistedmatrix.com/trac/ticket/6938
b"this is a bad log format" + self.linesep,
FilePath(logPath).getContent())
class HTTPFactoryAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{http.HTTPFactory.log}.
"""
factory = http.HTTPFactory
linesep = b"\n"
class SiteAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{server.Site.log}.
"""
if _PY3:
skip = "Site not ported to Python 3 yet."
linesep = os.linesep
def factory(self, *args, **kwargs):
return server.Site(resource.Resource(), *args, **kwargs)
class CombinedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.combinedLogFormatter}.
"""
def test_interface(self):
"""
L{combinedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.combinedLogFormatter))
def test_nonASCII(self):
"""
Bytes in fields of the request which are not part of ASCII are escaped
in the result.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
request.client = IPv4Address("TCP", b"evil x-forwarded-for \x80", 12345)
request.method = b"POS\x81"
request.protocol = b"HTTP/1.\x82"
request.headers[b"referer"] = b"evil \x83"
request.headers[b"user-agent"] = b"evil \x84"
line = http.combinedLogFormatter(timestamp, request)
self.assertEqual(
u'"evil x-forwarded-for \\x80" - - [13/Feb/2009:23:31:30 +0000] '
u'"POS\\x81 /dummy HTTP/1.0" 123 - "evil \\x83" "evil \\x84"',
line)
class ProxiedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.proxiedLogFormatter}.
"""
def test_interface(self):
"""
L{proxiedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.proxiedLogFormatter))
def _xforwardedforTest(self, header):
"""
Assert that a request with the given value in its I{X-Forwarded-For}
header is logged by L{proxiedLogFormatter} the same way it would have
been logged by L{combinedLogFormatter} but with 172.16.1.2 as the
client address instead of the normal value.
@param header: An I{X-Forwarded-For} header with left-most address of
172.16.1.2.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
expected = http.combinedLogFormatter(timestamp, request).replace(
u"1.2.3.4", u"172.16.1.2")
request.requestHeaders.setRawHeaders(b"x-forwarded-for", [header])
line = http.proxiedLogFormatter(timestamp, request)
self.assertEqual(expected, line)
def test_xforwardedfor(self):
"""
L{proxiedLogFormatter} logs the value of the I{X-Forwarded-For} header
in place of the client address field.
"""
self._xforwardedforTest(b"172.16.1.2, 10.0.0.3, 192.168.1.4")
def test_extraForwardedSpaces(self):
"""
Any extra spaces around the address in the I{X-Forwarded-For} header
are stripped and not included in the log string.
"""
self._xforwardedforTest(b" 172.16.1.2 , 10.0.0.3, 192.168.1.4")
class LogEscapingTests(unittest.TestCase):
def setUp(self):
self.logPath = self.mktemp()
self.site = http.HTTPFactory(self.logPath)
self.site.startFactory()
self.request = DummyRequestForLogTest(self.site, False)
def assertLogs(self, line):
"""
Assert that if C{self.request} is logged using C{self.site} then
C{line} is written to the site's access log file.
@param line: The expected line.
@type line: L{bytes}
@raise self.failureException: If the log file contains something other
than the expected line.
"""
try:
self.site.log(self.request)
finally:
self.site.stopFactory()
logged = FilePath(self.logPath).getContent()
self.assertEqual(line, logged)
def test_simple(self):
"""
A I{GET} request is logged with no extra escapes.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_methodQuote(self):
"""
If the HTTP request method includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = b'G"T'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_requestQuote(self):
"""
If the HTTP request path includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri = b'/dummy"withquote'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def test_protoQuote(self):
"""
If the HTTP request version includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto = b'HT"P/1.0'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def test_refererQuote(self):
"""
If the value of the I{Referer} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'referer'] = (
b'http://malicious" ".website.invalid')
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - '
b'"http://malicious\\" \\".website.invalid" "-"\n')
def test_userAgentQuote(self):
"""
If the value of the I{User-Agent} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'user-agent'] = b'Malicious Web" Evil'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
class ServerAttributesTests(unittest.TestCase):
"""
Tests that deprecated twisted.web.server attributes raise the appropriate
deprecation warnings when used.
"""
def test_deprecatedAttributeDateTimeString(self):
"""
twisted.web.server.date_time_string should not be used; instead use
twisted.web.http.datetimeToString directly
"""
server.date_time_string
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeDateTimeString])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.date_time_string was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.datetimeToString instead"))
def test_deprecatedAttributeStringDateTime(self):
"""
twisted.web.server.string_date_time should not be used; instead use
twisted.web.http.stringToDatetime directly
"""
server.string_date_time
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeStringDateTime])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.string_date_time was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.stringToDatetime instead")) | unknown | codeparrot/codeparrot-clean | ||
import re
import itertools
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import (
ManyRelatedObjectsDescriptor,
RelatedObject,
)
from django.core.exceptions import ImproperlyConfigured
from drf_nested_resource.compat import singular_noun, GenericForeignKey, GenericRelation, GenericRel
def is_generic_relationship_pair(parent_field, child_field):
"""
Given a field from the parent model and a field from the child model
"""
if not isinstance(child_field, GenericForeignKey):
return False
if not isinstance(parent_field, GenericRelation):
return False
child_model = child_field.model
ct_field = child_model._meta.get_field(child_field.ct_field)
fk_field = child_model._meta.get_field(child_field.fk_field)
expected_ct_field = child_model._meta.get_field(
parent_field.content_type_field_name,
)
expected_fk_field = child_model._meta.get_field(
parent_field.object_id_field_name,
)
return (
ct_field == expected_ct_field and fk_field == expected_fk_field
)
def find_child_to_parent_accessor_name(parent_model, child_model):
# ForeignKey relationship
for field in child_model._meta.fields:
if isinstance(field, models.ForeignKey) and field.rel.to is parent_model:
return field.name
# ManyToMany relationship where the field is declared on the `child_model`
for field in child_model._meta.many_to_many:
if field.rel.to is parent_model:
return field.attname
# ManyToMany relationship where the field is declared on the `parent_model`
for field in parent_model._meta.many_to_many:
if field.rel.to is child_model:
return field.rel.related_name
# GenericForeignKey relationship
for parent_field, child_field in itertools.product(parent_model._meta.virtual_fields, child_model._meta.virtual_fields):
if is_generic_relationship_pair(parent_field, child_field):
return child_field.name
raise ImproperlyConfigured(
"Parent model '{0}' cannot be found for model '{1}'.".format(
parent_model._meta.model_name, child_model._meta.model_name)
)
def find_child_to_parent_serializer_field(serializer_class, parent_accessor_name):
"""
Given a serializer class (for the child model) and the name of the
attribute on the child model which references the parent model, find the
name of the serializer field that *likely* references the parent model.
"""
serializer = serializer_class()
def is_serializer_field(name):
return name in serializer.get_fields()
# keep track of the values that were checked for nice error message reporting.
checked = [parent_accessor_name]
# there may be a serializer field by the exact name of the child to
# parent accessor attribute.
if is_serializer_field(parent_accessor_name):
return parent_accessor_name
# it may be something like a ForeignKey whech has the `_id` suffix.
child_model = serializer.Meta.model
try:
field = child_model._meta.get_field(parent_accessor_name)
checked.append(field.attname)
if is_serializer_field(field.attname):
return field.attname
except FieldDoesNotExist:
pass
raise ImproperlyConfigured(
"Unable to find field on serializer that parent model. Checked {0}. "
"You may need to declare `parent_serializer_field` on your view if the "
"field is not in one of these locations.".format(
checked,
)
)
def get_virtual_field(model, field_name):
matched_fields = filter(
lambda f: f.name == field_name,
model._meta.virtual_fields,
)
if len(matched_fields) == 1:
return matched_fields[0]
raise FieldDoesNotExist(
"{!r} has no virtual field named {!r}".format(
model,
field_name,
)
)
def camel_case_to_snake_case(value):
"""
source: http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def compute_default_url_kwarg_for_parent(parent_model, child_model):
"""
Given a `parent_model` and a `child_model` which
"""
parent_accessor_name = find_child_to_parent_accessor_name(
parent_model=parent_model,
child_model=child_model,
)
# ForeignKey and ManyToManyField strategies
try:
field = child_model._meta.get_field(parent_accessor_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field, models.ManyToManyField):
# this isn't generic enough as it only accounts for fields that
# are named with the pluralization of the other model that ends
# with an `s`.
return '{0}_{1}'.format(singular_noun(field.name), 'pk')
elif isinstance(field, models.ForeignKey):
return '{0}_{1}'.format(field.name, 'pk')
else:
raise ImproperlyConfigured(
"No known strategy for computing url kwarg for field of type {!r}".format(
type(field),
)
)
# GenericForeignKey strategy
try:
field = get_virtual_field(child_model, parent_accessor_name)
except FieldDoesNotExist:
pass
else:
return '{0}_{1}'.format(
camel_case_to_snake_case(parent_model._meta.object_name),
'pk',
)
# ManyToManyField from *other* side of the relationship.
child_to_parent_accessor = getattr(child_model, parent_accessor_name, None)
if isinstance(child_to_parent_accessor, ManyRelatedObjectsDescriptor):
return '{0}_{1}'.format(
singular_noun(parent_accessor_name),
'pk',
)
raise ImproperlyConfigured(
"No known strategy found for computing the url parameter for model "
"{!r}. You may need to declare `parent_url_kwarg` on your "
"view.".format(
parent_model,
)
)
def get_all_virtual_relations(model):
generic_relations = filter(
lambda f: isinstance(f, GenericRelation),
model._meta.virtual_fields,
)
return [field.rel for field in generic_relations]
def find_parent_to_child_manager(parent_obj, child_model):
def is_relation_to_child_model(rel):
if isinstance(rel, GenericRel):
return issubclass(rel.to, child_model)
if isinstance(rel, RelatedObject):
if isinstance(parent_obj, rel.parent_model) and issubclass(rel.model, child_model):
return True
# reverse
if issubclass(rel.parent_model, child_model):
if isinstance(parent_obj, rel.model):
return True
else:
assert False, "This code path should not be possible"
related_objects = filter(
is_relation_to_child_model,
itertools.chain(
# ForeignKey relations
parent_obj._meta.get_all_related_objects(),
# GenericForeignKey relations
get_all_virtual_relations(parent_obj),
# ManyToMany relations
parent_obj._meta.get_all_related_many_to_many_objects(),
# ManyToMany relations (from the other model)
child_model._meta.get_all_related_many_to_many_objects(),
)
)
if len(set(related_objects)) < 1:
raise ImproperlyConfigured(
"Unable to find manager from {!r} to {!r}. You may need to declare "
"`parent_to_child_manager_attr` on your view if the manager is in a "
"custom location.".format(
parent_obj.__class__, child_model,
)
)
elif len(set(related_objects)) > 1:
raise ImproperlyConfigured(
"Found multiple valid related objects while trying to find manager "
"from {!r} to {!r}. You may need to declare "
"`parent_to_child_manager_attr` on your view. Found related objects "
"{!r}".format(
parent_obj.__class__, child_model, related_objects,
)
)
rel = related_objects[0]
if isinstance(rel, GenericRel):
return getattr(parent_obj, rel.field.attname)
elif issubclass(rel.model, child_model):
if rel.model == rel.parent_model:
# Self referencing ManyToManyField
return getattr(parent_obj, rel.field.attname)
else:
return getattr(parent_obj, rel.get_accessor_name())
elif isinstance(parent_obj, rel.model):
return getattr(parent_obj, rel.field.attname) | unknown | codeparrot/codeparrot-clean | ||
# Official Colors
The kubernetes logo has an official blue color. When reproducing the logo, please use the official color, when possible.
## Pantone
When possible, the Pantone color is preferred for print material. The official Pantone color is *285C*.
## RGB
When used digitally, the official RGB color code is *#326CE5*. | unknown | github | https://github.com/kubernetes/kubernetes | logo/colors.md |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.caffeine;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import java.util.function.Supplier;
import com.github.benmanes.caffeine.cache.AsyncCache;
import com.github.benmanes.caffeine.cache.LoadingCache;
import org.jspecify.annotations.Nullable;
import org.springframework.cache.support.AbstractValueAdaptingCache;
import org.springframework.util.Assert;
/**
* Spring {@link org.springframework.cache.Cache} adapter implementation
* on top of a Caffeine {@link com.github.benmanes.caffeine.cache.Cache} instance.
*
* <p>Supports the {@link #retrieve(Object)} and {@link #retrieve(Object, Supplier)}
* operations through Caffeine's {@link AsyncCache}, when provided via the
* {@link #CaffeineCache(String, AsyncCache, boolean)} constructor.
*
* <p>Requires Caffeine 3.0 or higher.
*
* @author Ben Manes
* @author Juergen Hoeller
* @author Stephane Nicoll
* @since 4.3
* @see CaffeineCacheManager
*/
public class CaffeineCache extends AbstractValueAdaptingCache {
private final String name;
private final com.github.benmanes.caffeine.cache.Cache<Object, Object> cache;
private @Nullable AsyncCache<Object, Object> asyncCache;
/**
* Create a {@link CaffeineCache} instance with the specified name and the
* given internal {@link com.github.benmanes.caffeine.cache.Cache} to use.
* @param name the name of the cache
* @param cache the backing Caffeine Cache instance
*/
public CaffeineCache(String name, com.github.benmanes.caffeine.cache.Cache<Object, Object> cache) {
this(name, cache, true);
}
/**
* Create a {@link CaffeineCache} instance with the specified name and the
* given internal {@link com.github.benmanes.caffeine.cache.Cache} to use.
* @param name the name of the cache
* @param cache the backing Caffeine Cache instance
* @param allowNullValues whether to accept and convert {@code null} values
* for this cache
*/
public CaffeineCache(String name, com.github.benmanes.caffeine.cache.Cache<Object, Object> cache,
boolean allowNullValues) {
super(allowNullValues);
Assert.notNull(name, "Name must not be null");
Assert.notNull(cache, "Cache must not be null");
this.name = name;
this.cache = cache;
}
/**
* Create a {@link CaffeineCache} instance with the specified name and the
* given internal {@link AsyncCache} to use.
* @param name the name of the cache
* @param cache the backing Caffeine AsyncCache instance
* @param allowNullValues whether to accept and convert {@code null} values
* for this cache
* @since 6.1
*/
public CaffeineCache(String name, AsyncCache<Object, Object> cache, boolean allowNullValues) {
super(allowNullValues);
Assert.notNull(name, "Name must not be null");
Assert.notNull(cache, "Cache must not be null");
this.name = name;
this.cache = cache.synchronous();
this.asyncCache = cache;
}
@Override
public final String getName() {
return this.name;
}
/**
* Return the internal Caffeine Cache
* (possibly an adapter on top of an {@link #getAsyncCache()}).
*/
@Override
public final com.github.benmanes.caffeine.cache.Cache<Object, Object> getNativeCache() {
return this.cache;
}
/**
* Return the internal Caffeine AsyncCache.
* @throws IllegalStateException if no AsyncCache is available
* @since 6.1
* @see #CaffeineCache(String, AsyncCache, boolean)
* @see CaffeineCacheManager#setAsyncCacheMode
*/
public final AsyncCache<Object, Object> getAsyncCache() {
Assert.state(this.asyncCache != null,
"No Caffeine AsyncCache available: set CaffeineCacheManager.setAsyncCacheMode(true)");
return this.asyncCache;
}
@SuppressWarnings("unchecked")
@Override
public <T> @Nullable T get(Object key, Callable<T> valueLoader) {
return (T) fromStoreValue(this.cache.get(key, new LoadFunction(valueLoader)));
}
@Override
public @Nullable CompletableFuture<?> retrieve(Object key) {
CompletableFuture<?> result = getAsyncCache().getIfPresent(key);
if (result != null && isAllowNullValues()) {
result = result.thenApply(this::toValueWrapper);
}
return result;
}
@SuppressWarnings("unchecked")
@Override
public <T> CompletableFuture<T> retrieve(Object key, Supplier<CompletableFuture<T>> valueLoader) {
if (isAllowNullValues()) {
return (CompletableFuture<T>) getAsyncCache()
.get(key, (k, e) -> valueLoader.get().thenApply(this::toStoreValue))
.thenApply(this::fromStoreValue);
}
else {
return (CompletableFuture<T>) getAsyncCache().get(key, (k, e) -> valueLoader.get());
}
}
@Override
protected @Nullable Object lookup(Object key) {
if (this.cache instanceof LoadingCache<Object, Object> loadingCache) {
return loadingCache.get(key);
}
return this.cache.getIfPresent(key);
}
@Override
public void put(Object key, @Nullable Object value) {
this.cache.put(key, toStoreValue(value));
}
@Override
public @Nullable ValueWrapper putIfAbsent(Object key, @Nullable Object value) {
PutIfAbsentFunction callable = new PutIfAbsentFunction(value);
Object result = this.cache.get(key, callable);
return (callable.called ? null : toValueWrapper(result));
}
@Override
public void evict(Object key) {
this.cache.invalidate(key);
}
@Override
public boolean evictIfPresent(Object key) {
return (this.cache.asMap().remove(key) != null);
}
@Override
public void clear() {
this.cache.invalidateAll();
}
@Override
public boolean invalidate() {
boolean notEmpty = !this.cache.asMap().isEmpty();
this.cache.invalidateAll();
return notEmpty;
}
private class PutIfAbsentFunction implements Function<Object, Object> {
private final @Nullable Object value;
boolean called;
public PutIfAbsentFunction(@Nullable Object value) {
this.value = value;
}
@Override
public Object apply(Object key) {
this.called = true;
return toStoreValue(this.value);
}
}
private class LoadFunction implements Function<Object, Object> {
private final Callable<?> valueLoader;
public LoadFunction(Callable<?> valueLoader) {
Assert.notNull(valueLoader, "Callable must not be null");
this.valueLoader = valueLoader;
}
@Override
public Object apply(Object key) {
try {
return toStoreValue(this.valueLoader.call());
}
catch (Exception ex) {
throw new ValueRetrievalException(key, this.valueLoader, ex);
}
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context-support/src/main/java/org/springframework/cache/caffeine/CaffeineCache.java |
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cluster-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
# leader election
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update", "patch", "delete"]
# accessing & modifying cluster state (nodes & pods)
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
# read-only access to cluster state
- apiGroups: [""]
resources: ["services", "replicationcontrollers", "persistentvolumes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["get", "list", "watch"]
# misc access
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["cluster-autoscaler-status"]
verbs: ["get", "update", "patch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cluster-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: User
name: cluster-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-autoscaler
apiGroup: rbac.authorization.k8s.io | unknown | github | https://github.com/kubernetes/kubernetes | cluster/addons/rbac/cluster-autoscaler/cluster-autoscaler-rbac.yaml |
"""
homeassistant.components.sensor.neurio_energy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monitors home energy use as measured by an neurio hub using its official API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.neurio_energy/
"""
import logging
import requests.exceptions
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['neurio==0.2.10']
_LOGGER = logging.getLogger(__name__)
ICON = 'mdi:flash'
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Neurio sensor. """
api_key = config.get("api_key")
api_secret = config.get("api_secret")
sensor_id = config.get("sensor_id")
if not api_key and not api_secret:
_LOGGER.error(
"Configuration Error"
"Please make sure you have configured your api key and api secret")
return None
if not sensor_id:
import neurio
neurio_tp = neurio.TokenProvider(key=api_key, secret=api_secret)
neurio_client = neurio.Client(token_provider=neurio_tp)
user_info = neurio_client.get_user_information()
_LOGGER.warning('Sensor ID auto-detected, set api_sensor_id: "%s"',
user_info["locations"][0]["sensors"][0]["sensorId"])
sensor_id = user_info["locations"][0]["sensors"][0]["sensorId"]
dev = []
dev.append(NeurioEnergy(api_key, api_secret, sensor_id))
add_devices(dev)
# pylint: disable=too-many-instance-attributes
class NeurioEnergy(Entity):
""" Implements an Neurio energy. """
# pylint: disable=too-many-arguments
def __init__(self, api_key, api_secret, sensor_id):
self._name = "Energy Usage"
self.api_key = api_key
self.api_secret = api_secret
self.sensor_id = sensor_id
self._state = None
self._unit_of_measurement = "W"
@property
def name(self):
""" Returns the name. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
@property
def icon(self):
""" Icon to use in the frontend, if any. """
return ICON
def update(self):
""" Gets the Neurio monitor data from the web service. """
import neurio
try:
neurio_tp = neurio.TokenProvider(key=self.api_key,
secret=self.api_secret)
neurio_client = neurio.Client(token_provider=neurio_tp)
sample = neurio_client.get_samples_live_last(
sensor_id=self.sensor_id)
self._state = sample['consumptionPower']
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning('Could not update status for %s', self.name) | unknown | codeparrot/codeparrot-clean | ||
/*
* src/bin/pg_dump/pg_backup_tar.h
*
* TAR Header (see "ustar interchange format" in POSIX 1003.1)
*
* Offset Length Contents
* 0 100 bytes File name ('\0' terminated, 99 maximum length)
* 100 8 bytes File mode (in octal ascii)
* 108 8 bytes User ID (in octal ascii)
* 116 8 bytes Group ID (in octal ascii)
* 124 12 bytes File size (in octal ascii)
* 136 12 bytes Modify time (Unix timestamp in octal ascii)
* 148 8 bytes Header checksum (in octal ascii)
* 156 1 bytes Type flag (see below)
* 157 100 bytes Linkname, if symlink ('\0' terminated, 99 maximum length)
* 257 6 bytes Magic ("ustar\0")
* 263 2 bytes Version ("00")
* 265 32 bytes User name ('\0' terminated, 31 maximum length)
* 297 32 bytes Group name ('\0' terminated, 31 maximum length)
* 329 8 bytes Major device ID (in octal ascii)
* 337 8 bytes Minor device ID (in octal ascii)
* 345 155 bytes File name prefix (not used in our implementation)
* 500 12 bytes Padding
*
* 512 (s+p)bytes File contents, padded out to 512-byte boundary
*/
/* The type flag defines the type of file */
#define LF_OLDNORMAL '\0' /* Normal disk file, Unix compatible */
#define LF_NORMAL '0' /* Normal disk file */
#define LF_LINK '1' /* Link to previously dumped file */
#define LF_SYMLINK '2' /* Symbolic link */
#define LF_CHR '3' /* Character special file */
#define LF_BLK '4' /* Block special file */
#define LF_DIR '5' /* Directory */
#define LF_FIFO '6' /* FIFO special file */
#define LF_CONTIG '7' /* Contiguous file */ | c | github | https://github.com/postgres/postgres | src/bin/pg_dump/pg_backup_tar.h |
it("should load only used exports", async () => {
await import("../statical-dynamic-import/dir1/a").then(({ default: defaultValue, usedExports }) => {
expect(defaultValue).toBe(3);
expect(usedExports).toEqual(["default", "usedExports"]);
})
});
it("should get warning on using 'webpackExports' with statically analyze-able dynamic import", async () => {
await import(/* webpackExports: ["default"] */"../statical-dynamic-import/dir1/a?2").then(({ a }) => {
expect(a).toBe(1);
})
});
it("should not tree-shake default export for exportsType=default module", async () => {
await import("../statical-dynamic-import/dir2/json/object.json").then(({ default: defaultValue }) => {
expect(defaultValue).toEqual({ a: 1 });
});
await import("../statical-dynamic-import/dir2/json/array.json").then(({ default: defaultValue }) => {
expect(defaultValue).toEqual(["a"]);
});
await import("../statical-dynamic-import/dir2/json/primitive.json").then(({ default: defaultValue }) => {
expect(defaultValue).toBe("a");
});
await import("../statical-dynamic-import/dir2/a").then(({ default: defaultValue }) => {
expect(defaultValue).toEqual({ a: 1, b: 2 });
});
});
it("should not tree-shake default export for exportsType=default context module", async () => {
const dir = "json";
await import(`../statical-dynamic-import/dir3/${dir}/object.json`).then(({ default: defaultValue }) => {
expect(defaultValue).toEqual({ a: 1 });
});
await import(`../statical-dynamic-import/dir3/${dir}/array.json`).then(({ default: defaultValue }) => {
expect(defaultValue).toEqual(["a"]);
});
await import(`../statical-dynamic-import/dir3/${dir}/primitive.json`).then(({ default: defaultValue }) => {
expect(defaultValue).toBe("a");
});
const file = "a";
await import(`../statical-dynamic-import/dir3/${file}`).then(({ default: defaultValue }) => {
expect(defaultValue).toEqual({ a: 1, b: 2 });
});
});
it("should walk with correct order", async () => {
var r;
await import(`./dir1/a${r = require, ".js"}`).then(({ a, usedExports }) => {
expect(r("./required").value).toBe(42);
expect(a).toBe(1);
expect(usedExports).toEqual(["a", "usedExports"]);
});
});
it("should analyze arguments in call member chain", async () => {
await import("../statical-dynamic-import/dir4/lib?2").then(({ b }) => {
b.f((async () => {
await import("../statical-dynamic-import/dir4/a?2").then(({ a, usedExports }) => {
expect(a).toBe(1);
expect(usedExports).toEqual(["a", "usedExports"]);
});
})());
});
});
it("should static analyze dynamic import variable destructuring assignment", async () => {
await import("../statical-dynamic-import/dir1/a?3").then(m => {
const { default: def, usedExports } = m;
expect(def).toBe(3);
expect(usedExports).toEqual(["default", "usedExports"]);
});
});
it("expect support of \"deep\" tree-shaking for destructuring assignment dynamic import", async () => {
await import("../statical-dynamic-import-destructuring/lib").then(({ a: { aaa, usedExports: usedExportsA }, b: { bbb, usedExports: usedExportsB } }) => {
expect(aaa).toBe(1);
expect(bbb).toBe(2);
expect(usedExportsA).toEqual(["aaa", "usedExports"]);
expect(usedExportsB).toEqual(["bbb", "usedExports"]);
});
await import("../statical-dynamic-import-destructuring/lib?2").then(m => {
const { a: { aaa, usedExports: usedExportsA }, b: { bbb, usedExports: usedExportsB } } = m;
expect(aaa).toBe(1);
expect(bbb).toBe(2);
expect(usedExportsA).toEqual(["aaa", "usedExports"]);
expect(usedExportsB).toEqual(["bbb", "usedExports"]);
});
}); | javascript | github | https://github.com/webpack/webpack | test/cases/chunks/statical-dynamic-import-then-destructuring/index.js |
import warnings
import numpy as np
import pandas as pd
class Methods:
params = (
["DataFrame", "Series"],
[("rolling", {"window": 10}), ("rolling", {"window": 1000}), ("expanding", {})],
["int", "float"],
[
"median",
"mean",
"max",
"min",
"std",
"count",
"skew",
"kurt",
"sum",
"sem",
"nunique",
],
)
param_names = ["constructor", "window_kwargs", "dtype", "method"]
def setup(self, constructor, window_kwargs, dtype, method):
N = 10**5
window, kwargs = window_kwargs
arr = (100 * np.random.random(N)).astype(dtype)
obj = getattr(pd, constructor)(arr)
self.window = getattr(obj, window)(**kwargs)
def time_method(self, constructor, window_kwargs, dtype, method):
getattr(self.window, method)()
def peakmem_method(self, constructor, window_kwargs, dtype, method):
getattr(self.window, method)()
class Apply:
params = (
["DataFrame", "Series"],
[3, 300],
["int", "float"],
[sum, np.sum, lambda x: np.sum(x) + 5],
[True, False],
)
param_names = ["constructor", "window", "dtype", "function", "raw"]
def setup(self, constructor, window, dtype, function, raw):
N = 10**3
arr = (100 * np.random.random(N)).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rolling(self, constructor, window, dtype, function, raw):
self.roll.apply(function, raw=raw)
class NumbaEngineMethods:
params = (
["DataFrame", "Series"],
["int", "float"],
[("rolling", {"window": 10}), ("expanding", {})],
["sum", "max", "min", "median", "mean", "var", "std"],
[True, False],
[None, 100],
)
param_names = [
"constructor",
"dtype",
"window_kwargs",
"method",
"parallel",
"cols",
]
def setup(self, constructor, dtype, window_kwargs, method, parallel, cols):
N = 10**3
window, kwargs = window_kwargs
shape = (N, cols) if cols is not None and constructor != "Series" else N
arr = (100 * np.random.random(shape)).astype(dtype)
data = getattr(pd, constructor)(arr)
# Warm the cache
with warnings.catch_warnings(record=True):
# Catch parallel=True not being applicable e.g. 1D data
self.window = getattr(data, window)(**kwargs)
getattr(self.window, method)(
engine="numba", engine_kwargs={"parallel": parallel}
)
def test_method(self, constructor, dtype, window_kwargs, method, parallel, cols):
with warnings.catch_warnings(record=True):
getattr(self.window, method)(
engine="numba", engine_kwargs={"parallel": parallel}
)
class NumbaEngineApply:
params = (
["DataFrame", "Series"],
["int", "float"],
[("rolling", {"window": 10}), ("expanding", {})],
[np.sum, lambda x: np.sum(x) + 5],
[True, False],
[None, 100],
)
param_names = [
"constructor",
"dtype",
"window_kwargs",
"function",
"parallel",
"cols",
]
def setup(self, constructor, dtype, window_kwargs, function, parallel, cols):
N = 10**3
window, kwargs = window_kwargs
shape = (N, cols) if cols is not None and constructor != "Series" else N
arr = (100 * np.random.random(shape)).astype(dtype)
data = getattr(pd, constructor)(arr)
# Warm the cache
with warnings.catch_warnings(record=True):
# Catch parallel=True not being applicable e.g. 1D data
self.window = getattr(data, window)(**kwargs)
self.window.apply(
function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
)
def test_method(self, constructor, dtype, window_kwargs, function, parallel, cols):
with warnings.catch_warnings(record=True):
self.window.apply(
function, raw=True, engine="numba", engine_kwargs={"parallel": parallel}
)
class EWMMethods:
params = (
["DataFrame", "Series"],
[
({"halflife": 10}, "mean"),
({"halflife": 10}, "std"),
({"halflife": 1000}, "mean"),
({"halflife": 1000}, "std"),
(
{
"halflife": "1 Day",
"times": pd.date_range("1900", periods=10**5, freq="23s"),
},
"mean",
),
],
["int", "float"],
)
param_names = ["constructor", "kwargs_method", "dtype"]
def setup(self, constructor, kwargs_method, dtype):
N = 10**5
kwargs, method = kwargs_method
arr = (100 * np.random.random(N)).astype(dtype)
self.method = method
self.ewm = getattr(pd, constructor)(arr).ewm(**kwargs)
def time_ewm(self, constructor, kwargs_method, dtype):
getattr(self.ewm, self.method)()
class VariableWindowMethods(Methods):
params = (
["DataFrame", "Series"],
["50s", "1h", "1d"],
["int", "float"],
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum", "sem"],
)
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, window, dtype, method):
N = 10**5
arr = (100 * np.random.random(N)).astype(dtype)
index = pd.date_range("2017-01-01", periods=N, freq="5s")
self.window = getattr(pd, constructor)(arr, index=index).rolling(window)
class Pairwise:
params = (
[({"window": 10}, "rolling"), ({"window": 1000}, "rolling"), ({}, "expanding")],
["corr", "cov"],
[True, False],
)
param_names = ["window_kwargs", "method", "pairwise"]
def setup(self, kwargs_window, method, pairwise):
N = 10**4
n_groups = 20
kwargs, window = kwargs_window
groups = [i for _ in range(N // n_groups) for i in range(n_groups)]
arr = np.random.random(N)
self.df = pd.DataFrame(arr)
self.window = getattr(self.df, window)(**kwargs)
self.window_group = getattr(
pd.DataFrame({"A": groups, "B": arr}).groupby("A"), window
)(**kwargs)
def time_pairwise(self, kwargs_window, method, pairwise):
getattr(self.window, method)(self.df, pairwise=pairwise)
def time_groupby(self, kwargs_window, method, pairwise):
getattr(self.window_group, method)(self.df, pairwise=pairwise)
class Quantile:
params = (
["DataFrame", "Series"],
[10, 1000],
["int", "float"],
[0, 0.5, 1],
["linear", "nearest", "lower", "higher", "midpoint"],
)
param_names = ["constructor", "window", "dtype", "percentile"]
def setup(self, constructor, window, dtype, percentile, interpolation):
N = 10**5
arr = np.random.random(N).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_quantile(self, constructor, window, dtype, percentile, interpolation):
self.roll.quantile(percentile, interpolation=interpolation)
class Rank:
params = (
["DataFrame", "Series"],
[10, 1000],
["int", "float"],
[True, False],
[True, False],
["min", "max", "average"],
)
param_names = [
"constructor",
"window",
"dtype",
"percentile",
"ascending",
"method",
]
def setup(self, constructor, window, dtype, percentile, ascending, method):
N = 10**5
arr = np.random.random(N).astype(dtype)
self.roll = getattr(pd, constructor)(arr).rolling(window)
def time_rank(self, constructor, window, dtype, percentile, ascending, method):
self.roll.rank(pct=percentile, ascending=ascending, method=method)
class PeakMemFixedWindowMinMax:
params = ["min", "max"]
def setup(self, operation):
N = 10**6
arr = np.random.random(N)
self.roll = pd.Series(arr).rolling(2)
def peakmem_fixed(self, operation):
for x in range(5):
getattr(self.roll, operation)()
class ForwardWindowMethods:
params = (
["DataFrame", "Series"],
[10, 1000],
["int", "float"],
["median", "mean", "max", "min", "kurt", "sum"],
)
param_names = ["constructor", "window_size", "dtype", "method"]
def setup(self, constructor, window_size, dtype, method):
N = 10**5
arr = np.random.random(N).astype(dtype)
indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=window_size)
self.roll = getattr(pd, constructor)(arr).rolling(window=indexer)
def time_rolling(self, constructor, window_size, dtype, method):
getattr(self.roll, method)()
def peakmem_rolling(self, constructor, window_size, dtype, method):
getattr(self.roll, method)()
class Groupby:
params = (
["sum", "median", "mean", "max", "min", "kurt", "sum"],
[
("rolling", {"window": 2}),
("rolling", {"window": "30s"}),
("expanding", {}),
],
)
def setup(self, method, window_kwargs):
N = 1000
window, kwargs = window_kwargs
df = pd.DataFrame(
{
"A": [str(i) for i in range(N)] * 10,
"B": list(range(N)) * 10,
}
)
if isinstance(kwargs.get("window", None), str):
df.index = pd.date_range(start="1900-01-01", freq="1min", periods=N * 10)
self.groupby_window = getattr(df.groupby("A"), window)(**kwargs)
def time_method(self, method, window_kwargs):
getattr(self.groupby_window, method)()
class GroupbyLargeGroups:
# https://github.com/pandas-dev/pandas/issues/38038
# specific example where the rolling operation on a larger dataframe
# is relatively cheap (few but large groups), but creation of
# MultiIndex of result can be expensive
def setup(self):
N = 100000
self.df = pd.DataFrame({"A": [1, 2] * (N // 2), "B": np.random.randn(N)})
def time_rolling_multiindex_creation(self):
self.df.groupby("A").rolling(3).mean()
class GroupbyEWM:
params = ["var", "std", "cov", "corr"]
param_names = ["method"]
def setup(self, method):
df = pd.DataFrame({"A": range(50), "B": range(50)})
self.gb_ewm = df.groupby("A").ewm(com=1.0)
def time_groupby_method(self, method):
getattr(self.gb_ewm, method)()
class GroupbyEWMEngine:
params = ["cython", "numba"]
param_names = ["engine"]
def setup(self, engine):
df = pd.DataFrame({"A": range(50), "B": range(50)})
self.gb_ewm = df.groupby("A").ewm(com=1.0)
def time_groupby_mean(self, engine):
self.gb_ewm.mean(engine=engine)
def table_method_func(x):
return np.sum(x, axis=0) + 1
class TableMethod:
params = ["single", "table"]
param_names = ["method"]
def setup(self, method):
self.df = pd.DataFrame(np.random.randn(10, 1000))
def time_apply(self, method):
self.df.rolling(2, method=method).apply(
table_method_func, raw=True, engine="numba"
)
def time_ewm_mean(self, method):
self.df.ewm(1, method=method).mean(engine="numba")
from .pandas_vb_common import setup # noqa: F401 isort:skip | python | github | https://github.com/pandas-dev/pandas | asv_bench/benchmarks/rolling.py |
# Copyright: 2017, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause )
from __future__ import annotations
import collections.abc as c
from ansible.module_utils._internal import _no_six
from ansible.module_utils.common.text.converters import to_text
BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True))
BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False))
BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE)
def boolean(value, strict=True):
if isinstance(value, bool):
return value
normalized_value = value
if isinstance(value, (str, bytes)):
normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip()
if not isinstance(value, c.Hashable):
normalized_value = None # prevent unhashable types from bombing, but keep the rest of the existing fallback/error behavior
if normalized_value in BOOLEANS_TRUE:
return True
elif normalized_value in BOOLEANS_FALSE or not strict:
return False
raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
def __getattr__(importable_name):
return _no_six.deprecate(importable_name, __name__, "binary_type", "text_type") | python | github | https://github.com/ansible/ansible | lib/ansible/module_utils/parsing/convert_bool.py |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package vault
import (
"context"
"crypto/hmac"
"encoding/base64"
"errors"
"fmt"
"maps"
"net/textproto"
"os"
paths "path"
"slices"
"strconv"
"strings"
"time"
"github.com/armon/go-metrics"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/constants"
"github.com/hashicorp/vault/helper/identity"
"github.com/hashicorp/vault/helper/identity/mfa"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/http/priority"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/errutil"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/policyutil"
"github.com/hashicorp/vault/sdk/helper/wrapping"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault/quotas"
"github.com/hashicorp/vault/vault/tokens"
uberAtomic "go.uber.org/atomic"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
replTimeout = 1 * time.Second
EnvVaultDisableLocalAuthMountEntities = "VAULT_DISABLE_LOCAL_AUTH_MOUNT_ENTITIES"
// base path to store locked users
coreLockedUsersPath = "core/login/lockedUsers/"
)
var (
// DefaultMaxRequestDuration is the amount of time we'll wait for a request
// to complete, unless overridden on a per-handler basis
DefaultMaxRequestDuration = 90 * time.Second
ErrNoApplicablePolicies = errors.New("no applicable policies")
ErrPolicyNotExistInTypeMap = errors.New("policy does not exist in type map")
egpDebugLogging bool
)
// HandlerProperties is used to seed configuration into a vaulthttp.Handler.
// It's in this package to avoid a circular dependency
type HandlerProperties struct {
Core *Core
ListenerConfig *configutil.Listener
DisablePrintableCheck bool
RecoveryMode bool
RecoveryToken *uberAtomic.String
// RequestIDGenerator is primary used for testing purposes to allow tests to
// control the request IDs deterministically. In production code (i.e. if this
// is nil) the handler will generate UUIDs.
RequestIDGenerator func() (string, error)
}
// fetchEntityAndDerivedPolicies returns the entity object for the given entity
// ID. If the entity is merged into a different entity object, the entity into
// which the given entity ID is merged into will be returned. This function
// also returns the cumulative list of policies that the entity is entitled to
// if skipDeriveEntityPolicies is set to false. This list includes the policies from the
// entity itself and from all the groups in which the given entity ID is a member of.
func (c *Core) fetchEntityAndDerivedPolicies(ctx context.Context, tokenNS *namespace.Namespace, entityID string, skipDeriveEntityPolicies bool) (*identity.Entity, map[string][]string, error) {
if entityID == "" || c.identityStore == nil {
return nil, nil, nil
}
entity, err := c.fetchEntity(entityID, false)
if entity == nil && err == nil {
return nil, nil, nil
}
if err != nil {
return nil, nil, err
}
policies := make(map[string][]string)
if !skipDeriveEntityPolicies {
// c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL")
// Attach the policies on the entity
if len(entity.Policies) != 0 {
policies[entity.NamespaceID] = append(policies[entity.NamespaceID], entity.Policies...)
}
groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID)
if err != nil {
c.logger.Error("failed to fetch group policies", "error", err)
return nil, nil, err
}
policiesByNS, err := c.filterGroupPoliciesByNS(ctx, tokenNS, groupPolicies)
if err != nil {
return nil, nil, err
}
for nsID, pss := range policiesByNS {
policies[nsID] = append(policies[nsID], pss...)
}
}
return entity, policies, err
}
// fetchEntity returns the entity object for the given entity
// ID. If the entity is merged into a different entity object, the entity into
// which the given entity ID is merged into will be returned.
func (c *Core) fetchEntity(entityID string, clone bool) (*identity.Entity, error) {
if entityID == "" || c.identityStore == nil {
return nil, nil
}
txn := c.identityStore.db.Txn(false)
return c.identityStore.fetchEntityInTxn(txn, entityID, clone)
}
// filterGroupPoliciesByNS takes a context, token namespace, and a map of
// namespace IDs to slices of group policy names and returns a similar map,
// but filtered down to the policies that should apply to the token based on the
// relationship between the namespace of the token and the namespace of the
// policy.
func (c *Core) filterGroupPoliciesByNS(ctx context.Context, tokenNS *namespace.Namespace, groupPolicies map[string][]string) (map[string][]string, error) {
policies := make(map[string][]string)
policyApplicationMode, err := c.GetGroupPolicyApplicationMode(ctx)
if err != nil {
return nil, err
}
for nsID, nsPolicies := range groupPolicies {
filteredPolicies, err := c.getApplicableGroupPolicies(ctx, tokenNS, nsID, nsPolicies, policyApplicationMode)
if err != nil && err != ErrNoApplicablePolicies {
return nil, err
}
filteredPolicies = strutil.RemoveDuplicates(filteredPolicies, false)
if len(filteredPolicies) != 0 {
policies[nsID] = append(policies[nsID], filteredPolicies...)
}
}
return policies, nil
}
// getApplicableGroupPolicies returns a slice of group policies that should
// apply to the token based on the group policy application mode,
// and the relationship between the token namespace and the group namespace.
func (c *Core) getApplicableGroupPolicies(ctx context.Context, tokenNS *namespace.Namespace, nsID string, nsPolicies []string, policyApplicationMode string) ([]string, error) {
policyNS, err := NamespaceByID(ctx, nsID, c)
if err != nil {
return nil, err
}
if policyNS == nil {
return nil, namespace.ErrNoNamespace
}
var filteredPolicies []string
if tokenNS.Path == policyNS.Path {
// Same namespace - add all and continue
for _, policyName := range nsPolicies {
filteredPolicies = append(filteredPolicies, policyName)
}
return filteredPolicies, nil
}
for _, policyName := range nsPolicies {
t, err := c.policyStore.GetNonEGPPolicyType(policyNS.ID, policyName)
if err != nil && errors.Is(err, ErrPolicyNotExistInTypeMap) {
// When we attempt to get a non-EGP policy type, and receive an
// explicit error that it doesn't exist (in the type map) we log the
// ns/policy and continue without error.
c.Logger().Debug(fmt.Errorf("%w: %v/%v", err, policyNS.ID, policyName).Error())
continue
}
if err != nil || t == nil {
return nil, fmt.Errorf("failed to look up type of policy: %w", err)
}
switch *t {
case PolicyTypeRGP:
if tokenNS.HasParent(policyNS) {
filteredPolicies = append(filteredPolicies, policyName)
}
case PolicyTypeACL:
if policyApplicationMode != groupPolicyApplicationModeWithinNamespaceHierarchy {
// Group policy application mode isn't set to enforce
// the namespace hierarchy, so apply all the ACLs,
// regardless of their namespaces.
filteredPolicies = append(filteredPolicies, policyName)
continue
}
if policyNS.HasParent(tokenNS) {
filteredPolicies = append(filteredPolicies, policyName)
}
default:
return nil, fmt.Errorf("unexpected policy type: %v", t)
}
}
if len(filteredPolicies) == 0 {
return nil, ErrNoApplicablePolicies
}
return filteredPolicies, nil
}
func (c *Core) fetchACLTokenEntryAndEntity(ctx context.Context, req *logical.Request) (*ACL, *logical.TokenEntry, *identity.Entity, map[string][]string, error) {
defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now())
// Ensure there is a client token
if req.ClientToken == "" {
return nil, nil, nil, nil, logical.ErrPermissionDenied
}
if c.tokenStore == nil {
c.logger.Error("token store is unavailable")
return nil, nil, nil, nil, ErrInternalError
}
// Resolve the token policy
var te *logical.TokenEntry
switch req.TokenEntry() {
case nil:
var err error
te, err = c.tokenStore.Lookup(ctx, req.ClientToken)
if err != nil {
c.logger.Error("failed to lookup acl token", "error", err)
return nil, nil, nil, nil, ErrInternalError
}
// Set the token entry here since it has not been cached yet
req.SetTokenEntry(te)
default:
te = req.TokenEntry()
}
// Ensure the token is valid
if te == nil {
return nil, nil, nil, nil, multierror.Append(logical.ErrPermissionDenied, logical.ErrInvalidToken)
}
// CIDR checks bind all tokens except non-expiring root tokens
if te.TTL != 0 && len(te.BoundCIDRs) > 0 {
var valid bool
remoteSockAddr, err := sockaddr.NewSockAddr(req.Connection.RemoteAddr)
if err != nil {
if c.Logger().IsDebug() {
c.Logger().Debug("could not parse remote addr into sockaddr", "error", err, "remote_addr", req.Connection.RemoteAddr)
}
return nil, nil, nil, nil, logical.ErrPermissionDenied
}
for _, cidr := range te.BoundCIDRs {
if cidr.Contains(remoteSockAddr) {
valid = true
break
}
}
if !valid {
return nil, nil, nil, nil, logical.ErrPermissionDenied
}
}
policyNames := make(map[string][]string)
// Add tokens policies
policyNames[te.NamespaceID] = append(policyNames[te.NamespaceID], te.Policies...)
tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c)
if err != nil {
c.logger.Error("failed to fetch token namespace", "error", err)
return nil, nil, nil, nil, ErrInternalError
}
if tokenNS == nil {
c.logger.Error("failed to fetch token namespace", "error", namespace.ErrNoNamespace)
return nil, nil, nil, nil, ErrInternalError
}
// Add identity policies from all the namespaces
entity, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID, te.NoIdentityPolicies)
if err != nil {
return nil, nil, nil, nil, ErrInternalError
}
for nsID, nsPolicies := range identityPolicies {
policyNames[nsID] = policyutil.SanitizePolicies(append(policyNames[nsID], nsPolicies...), false)
}
// Attach token's namespace information to the context. Wrapping tokens by
// should be able to be used anywhere, so we also special case behavior.
var tokenCtx context.Context
if len(policyNames) == 1 &&
len(policyNames[te.NamespaceID]) == 1 &&
(policyNames[te.NamespaceID][0] == responseWrappingPolicyName ||
policyNames[te.NamespaceID][0] == controlGroupPolicyName) &&
(strings.HasSuffix(req.Path, "sys/wrapping/unwrap") ||
strings.HasSuffix(req.Path, "sys/wrapping/lookup") ||
strings.HasSuffix(req.Path, "sys/wrapping/rewrap")) {
// Use the request namespace; will find the copy of the policy for the
// local namespace
tokenCtx = ctx
} else {
// Use the token's namespace for looking up policy
tokenCtx = namespace.ContextWithNamespace(ctx, tokenNS)
}
// Add the inline policy if it's set
policies := make([]*Policy, 0)
if te.InlinePolicy != "" {
// TODO (HCL_DUP_KEYS_DEPRECATION): return to ParseACLPolicy once the deprecation is done
inlinePolicy, duplicate, err := ParseACLPolicyCheckDuplicates(tokenNS, te.InlinePolicy)
if err != nil {
return nil, nil, nil, nil, ErrInternalError
}
if duplicate {
c.logger.Warn("HCL inline policy contains duplicate attributes, which will no longer be supported in a future version", "namespace", tokenNS.Path)
}
policies = append(policies, inlinePolicy)
}
// Construct the corresponding ACL object. ACL construction should be
// performed on the token's namespace.
acl, err := c.policyStore.ACL(tokenCtx, entity, policyNames, policies...)
if err != nil {
c.logger.Error("failed to construct ACL", "error", err)
return nil, nil, nil, nil, ErrInternalError
}
return acl, te, entity, identityPolicies, nil
}
// CheckTokenWithLock calls CheckToken after grabbing the internal stateLock,
// and also checking that we aren't in the process of shutting down.
func (c *Core) CheckTokenWithLock(ctx context.Context, req *logical.Request) (*logical.Auth, *logical.TokenEntry, error) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// first check that we aren't shutting down
if c.Sealed() {
return nil, nil, errors.New("core is sealed")
} else if c.activeContext != nil && c.activeContext.Err() != nil {
return nil, nil, c.activeContext.Err()
}
return c.CheckToken(ctx, req, false)
}
func (c *Core) existenceCheck(ctx context.Context, req *logical.Request) (*logical.Operation, error) {
existsResp, checkExists, resourceExists, err := c.router.RouteExistenceCheck(ctx, req)
switch err {
case logical.ErrUnsupportedPath:
// fail later via bad path to avoid confusing items in the log
checkExists = false
case logical.ErrRelativePath:
return nil, errutil.UserError{Err: err.Error()}
case nil:
if existsResp != nil && existsResp.IsError() {
return nil, existsResp.Error()
}
// Otherwise, continue on
default:
c.logger.Error("failed to run existence check", "error", err)
if _, ok := err.(errutil.UserError); ok {
return nil, err
} else {
return nil, ErrInternalError
}
}
var existenceCheckOp logical.Operation
switch {
case !checkExists:
// No existence check, so always treat it as an update operation, which is how it is pre 0.5
existenceCheckOp = logical.UpdateOperation
case resourceExists:
// It exists, so force an update operation
existenceCheckOp = logical.UpdateOperation
case !resourceExists:
// It doesn't exist, force a create operation
existenceCheckOp = logical.CreateOperation
default:
panic("unreachable code")
}
return &existenceCheckOp, nil
}
// CheckToken returns information about the state of authentication for a request.
// The unauth flag should be set true for "login" requests; these are requests to
// logical.Paths.Unauthenticated endpoints that are not deleted auth requests,
// though note that despite being called login requests, not all of them are actually
// login requests, as in attempts to authenticate and get back a token.
//
// Generally speaking, if CheckToken return an error, the request should fail,
// but there are exceptions, e.g. ErrPerfStandbyPleaseForward may just result in
// the request being forwarded to the active node. A returned error does not
// necessarily mean that the other return values will be nil; often they are
// useful e.g. for auditing failing requests.
//
// When unauth is true, fail root path requests, don't worry about regular
// ACL policy checks (though sentinel checks may still apply), and don't do
// client counting. When unauth is false, an invalid token or a lack of ACL
// perms will result in an error, and client counting applies.
func (c *Core) CheckToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) {
defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
var acl *ACL
var te *logical.TokenEntry
var entity *identity.Entity
var identityPolicies map[string][]string
// Even if unauth, if a token is provided, there's little reason not to
// gather as much info as possible for the audit log and to e.g. control
// trace mode for EGPs.
if !unauth || (unauth && req.ClientToken != "") {
var err error
acl, te, entity, identityPolicies, err = c.fetchACLTokenEntryAndEntity(ctx, req)
// In the unauth case we don't want to fail the command, since it's
// unauth, we just have no information to attach to the request, so
// ignore errors...this was best-effort anyways
if err != nil && !unauth {
return nil, te, err
}
}
if entity != nil && entity.Disabled {
c.logger.Warn("permission denied as the entity on the token is disabled")
return nil, te, logical.ErrPermissionDenied
}
if te != nil && te.EntityID != "" && entity == nil {
if c.perfStandby {
return nil, nil, logical.ErrPerfStandbyPleaseForward
}
c.logger.Warn("permission denied as the entity on the token is invalid")
return nil, te, logical.ErrPermissionDenied
}
// Check if this is a root protected path
rootPath := c.router.RootPath(ctx, req.Path)
if rootPath && unauth {
return nil, nil, errors.New("cannot access root path in unauthenticated request")
}
// At this point we won't be forwarding a raw request; we should delete
// authorization headers as appropriate. Don't delete Authorization headers
// if this is an unauth (login) request, as that precludes things like spiffe
// auth handling login requests with the payload in the Authorization header.
// However, if we did find a valid vault token in the header, we'll still
// suppress the header even if unauth is true, on the principle that we
// don't want to expose vault tokens to plugins which might do something
// nefarious with them.
switch {
case req.ClientTokenSource == logical.ClientTokenFromVaultHeader:
delete(req.Headers, consts.AuthHeaderName)
case req.ClientTokenSource == logical.ClientTokenFromAuthzHeader && !unauth && te == nil:
if headers, ok := req.Headers["Authorization"]; ok {
retHeaders := make([]string, 0, len(headers))
for _, v := range headers {
if strings.HasPrefix(v, "Bearer ") {
continue
}
retHeaders = append(retHeaders, v)
}
req.Headers["Authorization"] = retHeaders
}
}
// When we receive a write of either type, rather than require clients to
// PUT/POST and trust the operation, we ask the backend to give us the real
// skinny -- if the backend implements an existence check, it can tell us
// whether a particular resource exists. Then we can mark it as an update
// or creation as appropriate.
if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation {
op, err := c.existenceCheck(ctx, req)
if err != nil {
return nil, te, err
}
req.Operation = *op
}
// Create the auth response
auth := &logical.Auth{
ClientToken: req.ClientToken,
Accessor: req.ClientTokenAccessor,
}
var clientID string
var isTWE bool
if te != nil {
auth.IdentityPolicies = identityPolicies[te.NamespaceID]
auth.TokenPolicies = te.Policies
auth.Policies = policyutil.SanitizePolicies(append(te.Policies, identityPolicies[te.NamespaceID]...), false)
auth.Metadata = te.Meta
auth.DisplayName = te.DisplayName
auth.EntityID = te.EntityID
delete(identityPolicies, te.NamespaceID)
auth.ExternalNamespacePolicies = identityPolicies
// Store the entity ID in the request object
req.EntityID = te.EntityID
auth.TokenType = te.Type
auth.TTL = te.TTL
if te.CreationTime > 0 {
auth.IssueTime = time.Unix(te.CreationTime, 0)
}
clientID, isTWE = te.CreateClientID()
req.ClientID = clientID
}
twoStepRecover := req.Operation == logical.RecoverOperation && req.RecoverSourcePath != "" && req.RecoverSourcePath != req.Path
var alternateRecoverCapability *logical.Operation
if twoStepRecover {
// An existence check call requires the operation to be set to either
// create or update. We set it to create here, then switch it back once
// the existence check is done.
req.Operation = logical.CreateOperation
op, err := c.existenceCheck(ctx, req)
req.Operation = logical.RecoverOperation
if err != nil {
return nil, te, err
}
alternateRecoverCapability = op
}
// Check the standard non-root ACLs. Return the token entry if it's not
// allowed so we can decrement the use count.
authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
Unauth: unauth,
RootPrivsRequired: rootPath,
CheckSourcePath: twoStepRecover,
RecoverAlternateCapability: alternateRecoverCapability,
})
// Assign the sudo path priority if the request is issued against a sudo path.
if rootPath {
pri := uint8(priority.NeverDrop)
auth.HTTPRequestPriority = &pri
}
auth.PolicyResults = &logical.PolicyResults{
Allowed: authResults.Allowed,
}
if !authResults.Allowed {
retErr := authResults.Error
// If we get a control group error and we are a performance standby,
// restore the client token information to the request so that we can
// forward this request properly to the active node.
if retErr.ErrorOrNil() != nil && checkErrControlGroupTokenNeedsCreated(retErr) &&
c.perfStandby && len(req.ClientToken) != 0 {
switch req.ClientTokenSource {
case logical.ClientTokenFromVaultHeader:
req.Headers[consts.AuthHeaderName] = []string{req.ClientToken}
case logical.ClientTokenFromAuthzHeader:
req.Headers["Authorization"] = append(req.Headers["Authorization"], fmt.Sprintf("Bearer %s", req.ClientToken))
}
// We also return the appropriate error so that the caller can forward the
// request to the active node
return auth, te, logical.ErrPerfStandbyPleaseForward
}
if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError {
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
}
return auth, te, retErr
}
if authResults.ACLResults != nil && len(authResults.ACLResults.GrantingPolicies) > 0 {
auth.PolicyResults.GrantingPolicies = authResults.ACLResults.GrantingPolicies
}
if authResults.SentinelResults != nil && len(authResults.SentinelResults.GrantingPolicies) > 0 {
auth.PolicyResults.GrantingPolicies = append(auth.PolicyResults.GrantingPolicies, authResults.SentinelResults.GrantingPolicies...)
}
c.activityLogLock.RLock()
activityLog := c.activityLog
c.activityLogLock.RUnlock()
// If it is an authenticated ( i.e. with vault token ) request, increment client count
if !unauth && activityLog != nil {
err := activityLog.HandleTokenUsage(ctx, te, clientID, isTWE)
if err != nil {
return auth, te, err
}
}
return auth, te, nil
}
// HandleRequest is used to handle a new incoming request
func (c *Core) HandleRequest(httpCtx context.Context, req *logical.Request) (resp *logical.Response, err error) {
return c.switchedLockHandleRequest(httpCtx, req, true)
}
func (c *Core) switchedLockHandleRequest(httpCtx context.Context, req *logical.Request, doLocking bool) (resp *logical.Response, err error) {
if doLocking {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
if c.Sealed() {
return nil, consts.ErrSealed
}
if c.standby && !c.perfStandby {
return nil, consts.ErrStandby
}
if c.activeContext == nil || c.activeContext.Err() != nil {
return nil, errors.New("active context canceled after getting state lock")
}
ctx, cancel := context.WithCancel(c.activeContext)
go func(ctx context.Context, httpCtx context.Context) {
select {
case <-ctx.Done():
case <-httpCtx.Done():
cancel()
}
}(ctx, httpCtx)
ns, err := namespace.FromContext(httpCtx)
if err != nil {
cancel()
return nil, fmt.Errorf("could not parse namespace from http context: %w", err)
}
ctx = namespace.ContextWithNamespace(ctx, ns)
inFlightReqID, ok := httpCtx.Value(logical.CtxKeyInFlightRequestID{}).(string)
if ok {
ctx = context.WithValue(ctx, logical.CtxKeyInFlightRequestID{}, inFlightReqID)
}
requestRole, ok := httpCtx.Value(logical.CtxKeyRequestRole{}).(string)
if ok {
ctx = context.WithValue(ctx, logical.CtxKeyRequestRole{}, requestRole)
}
if disable_repl_status, ok := logical.ContextDisableReplicationStatusEndpointsValue(httpCtx); ok {
ctx = logical.CreateContextDisableReplicationStatusEndpoints(ctx, disable_repl_status)
}
body, ok := logical.ContextOriginalBodyValue(httpCtx)
if ok {
ctx = logical.CreateContextOriginalBody(ctx, body)
}
redactVersion, redactAddresses, redactClusterName, ok := logical.CtxRedactionSettingsValue(httpCtx)
if ok {
ctx = logical.CreateContextRedactionSettings(ctx, redactVersion, redactAddresses, redactClusterName)
}
inFlightRequestPriority, ok := httpCtx.Value(logical.CtxKeyInFlightRequestPriority{}).(priority.AOPWritePriority)
if ok {
ctx = context.WithValue(ctx, logical.CtxKeyInFlightRequestPriority{}, inFlightRequestPriority)
}
resp, err = c.handleCancelableRequest(ctx, req)
req.SetTokenEntry(nil)
cancel()
return resp, err
}
func (c *Core) handleCancelableRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) {
waitGroup, err := waitForReplicationState(ctx, c, req)
if err != nil {
return nil, err
}
// Decrement the wait group when our request is done
if waitGroup != nil {
defer waitGroup.Done()
}
if c.MissingRequiredState(req.RequiredState(), c.perfStandby) {
return nil, logical.ErrMissingRequiredState
}
// Ensure the req contains a MountPoint as it is depended on by some
// functionality (e.g. quotas)
var entry *MountEntry
req.MountPoint, entry = c.router.MatchingMountAndEntry(ctx, req.Path)
if entry != nil && entry.Table == mountTableType && !c.IsMountTypeAllowed(entry.Type) {
return logical.ErrorResponse("mounts of type %q aren't supported by license", entry.Type), logical.ErrInvalidRequest
}
// If the request requires a snapshot ID, we need to perform checks to
// ensure the request is valid and lock the snapshot, so it doesn't get
// unloaded while the request is being processed.
if req.RequiresSnapshotID != "" {
if c.perfStandby {
return nil, logical.ErrPerfStandbyPleaseForward
}
unlockSnapshot, err := c.lockSnapshotForRequest(ctx, req, entry)
if err != nil {
return logical.ErrorResponse("unable to lock snapshot: " + err.Error()), err
}
defer unlockSnapshot()
}
// Allowing writing to a path ending in / makes it extremely difficult to
// understand user intent for the filesystem-like backends (kv,
// cubbyhole) -- did they want a key named foo/ or did they want to write
// to a directory foo/ with no (or forgotten) key, or...? It also affects
// lookup, because paths ending in / are considered prefixes by some
// backends. Basically, it's all just terrible, so don't allow it.
if strings.HasSuffix(req.Path, "/") &&
(req.Operation == logical.UpdateOperation ||
req.Operation == logical.CreateOperation ||
req.Operation == logical.PatchOperation ||
req.Operation == logical.RecoverOperation) {
if entry == nil || !entry.Config.TrimRequestTrailingSlashes {
return logical.ErrorResponse("cannot write to a path ending in '/'"), nil
} else {
req.Path = strings.TrimSuffix(req.Path, "/")
}
}
err = c.PopulateTokenEntry(ctx, req)
if err != nil {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
return nil, multierror.Append(err, logical.ErrInvalidToken)
}
return nil, err
}
// Always forward requests that are using a limited use count token.
if c.perfStandby && req.ClientTokenRemainingUses > 0 {
// Prevent forwarding on local-only requests.
return nil, logical.ErrPerfStandbyPleaseForward
}
ns, err := namespace.FromContext(ctx)
if err != nil {
return nil, fmt.Errorf("could not parse namespace from http context: %w", err)
}
var requestBodyToken string
var returnRequestAuthToken bool
// req.Path will be relative by this point. The prefix check is first
// to fail faster if we're not in this situation since it's a hot path
switch {
case strings.HasPrefix(req.Path, "sys/wrapping/"), strings.HasPrefix(req.Path, "auth/token/"):
// Get the token ns info; if we match the paths below we want to
// swap in the token context (but keep the relative path)
te := req.TokenEntry()
newCtx := ctx
if te != nil {
ns, err := NamespaceByID(ctx, te.NamespaceID, c)
if err != nil {
c.Logger().Warn("error looking up namespace from the token's namespace ID", "error", err)
return nil, err
}
if ns != nil {
newCtx = namespace.ContextWithNamespace(ctx, ns)
}
}
switch req.Path {
// Route the token wrapping request to its respective sys NS
case "sys/wrapping/lookup", "sys/wrapping/rewrap", "sys/wrapping/unwrap":
ctx = newCtx
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
valid, err := c.validateWrappingToken(ctx, req)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("error validating wrapping token: %s", err.Error())), logical.ErrPermissionDenied
}
if !valid {
return nil, consts.ErrInvalidWrappingToken
}
// The -self paths have no meaning outside of the token NS, so
// requests for these paths always go to the token NS
case "auth/token/lookup-self", "auth/token/renew-self", "auth/token/revoke-self":
ctx = newCtx
returnRequestAuthToken = true
// For the following operations, we can set the proper namespace context
// using the token's embedded nsID if a relative path was provided.
// The operation will still be gated by ACLs, which are checked later.
case "auth/token/lookup", "auth/token/renew", "auth/token/revoke", "auth/token/revoke-orphan":
token, ok := req.Data["token"]
// If the token is not present (e.g. a bad request), break out and let the backend
// handle the error
if !ok {
// If this is a token lookup request and if the token is not
// explicitly provided, it will use the client token so we simply set
// the context to the client token's context.
if req.Path == "auth/token/lookup" {
ctx = newCtx
}
break
}
if token == nil {
return logical.ErrorResponse("invalid token"), logical.ErrPermissionDenied
}
// We don't care if the token is a server side consistent token or not. Either way, we're going
// to be returning it for these paths instead of the short token stored in vault.
requestBodyToken = token.(string)
if IsSSCToken(token.(string)) {
token, err = c.CheckSSCToken(ctx, token.(string), c.isLoginRequest(ctx, req), c.perfStandby)
// If we receive an error from CheckSSCToken, we can assume the token is bad somehow, and the client
// should receive a 403 bad token error like they do for all other invalid tokens, unless the error
// specifies that we should forward the request or retry the request.
if err != nil {
if errors.Is(err, logical.ErrPerfStandbyPleaseForward) || errors.Is(err, logical.ErrMissingRequiredState) {
return nil, err
}
return logical.ErrorResponse("bad token"), logical.ErrPermissionDenied
}
req.Data["token"] = token
}
_, nsID := namespace.SplitIDFromString(token.(string))
if nsID != "" {
ns, err := NamespaceByID(ctx, nsID, c)
if err != nil {
c.Logger().Warn("error looking up namespace from the token's namespace ID", "error", err)
return nil, err
}
if ns != nil {
ctx = namespace.ContextWithNamespace(ctx, ns)
}
}
}
// The following relative sys/leases/ paths handles re-routing requests
// to the proper namespace using the lease ID on applicable paths.
case strings.HasPrefix(req.Path, "sys/leases/"):
switch req.Path {
// For the following operations, we can set the proper namespace context
// using the lease's embedded nsID if a relative path was provided.
// The operation will still be gated by ACLs, which are checked later.
case "sys/leases/lookup", "sys/leases/renew", "sys/leases/revoke", "sys/leases/revoke-force":
leaseID, ok := req.Data["lease_id"]
// If lease ID is not present, break out and let the backend handle the error
_, isString := leaseID.(string)
if !ok || !isString {
break
}
_, nsID := namespace.SplitIDFromString(leaseID.(string))
if nsID != "" {
ns, err := NamespaceByID(ctx, nsID, c)
if err != nil {
c.Logger().Warn("error looking up namespace from the lease's namespace ID", "error", err)
return nil, err
}
if ns != nil {
ctx = namespace.ContextWithNamespace(ctx, ns)
}
}
}
// Prevent any metrics requests to be forwarded from a standby node.
// Instead, we return an error since we cannot be sure if we have an
// active token store to validate the provided token.
case strings.HasPrefix(req.Path, "sys/metrics"):
if c.standby && !c.perfStandby {
return nil, ErrCannotForwardLocalOnly
}
}
ns, err = namespace.FromContext(ctx)
if err != nil {
return nil, errwrap.Wrapf("could not parse namespace from http context: {{err}}", err)
}
if !hasNamespaces(c) && ns.Path != "" {
return nil, logical.CodedError(403, "namespaces feature not enabled")
}
walState := &logical.WALState{}
ctx = logical.IndexStateContext(ctx, walState)
var auth *logical.Auth
if c.isLoginRequest(ctx, req) && req.ClientTokenSource != logical.ClientTokenFromInternalAuth {
resp, auth, err = c.handleLoginRequest(ctx, req)
} else {
resp, auth, err = c.handleRequest(ctx, req)
}
if err == nil && c.requestResponseCallback != nil {
c.requestResponseCallback(c.router.MatchingBackend(ctx, req.Path), req, resp)
}
// If we saved the token in the request, we should return it in the response
// data.
if resp != nil && resp.Data != nil {
if _, ok := resp.Data["error"]; !ok {
if requestBodyToken != "" {
resp.Data["id"] = requestBodyToken
} else if returnRequestAuthToken && req.InboundSSCToken != "" {
resp.Data["id"] = req.InboundSSCToken
}
}
}
if resp != nil && resp.Auth != nil && requestBodyToken != "" {
// if a client token has already been set and the request body token's internal token
// is equal to that value, then we can return the original request body token
tok, _ := c.DecodeSSCToken(requestBodyToken)
if resp.Auth.ClientToken == tok {
resp.Auth.ClientToken = requestBodyToken
}
}
// Ensure we don't leak internal data
if resp != nil {
if resp.Secret != nil {
resp.Secret.InternalData = nil
}
if resp.Auth != nil {
resp.Auth.InternalData = nil
}
}
// We are wrapping if there is anything to wrap (not a nil response) and a
// TTL was specified for the token. Errors on a call should be returned to
// the caller, so wrapping is turned off if an error is hit and the error
// is logged to the audit log.
wrapping := resp != nil &&
err == nil &&
!resp.IsError() &&
resp.WrapInfo != nil &&
resp.WrapInfo.TTL != 0 &&
resp.WrapInfo.Token == ""
if wrapping {
cubbyResp, cubbyErr := c.wrapInCubbyhole(ctx, req, resp, auth)
// If not successful, returns either an error response from the
// cubbyhole backend or an error; if either is set, set resp and err to
// those and continue so that that's what we audit log. Otherwise
// finish the wrapping and audit log that.
if cubbyResp != nil || cubbyErr != nil {
resp = cubbyResp
err = cubbyErr
} else {
wrappingResp := &logical.Response{
WrapInfo: resp.WrapInfo,
Warnings: resp.Warnings,
}
resp = wrappingResp
}
}
auditResp := resp
// When unwrapping we want to log the actual response that will be written
// out. We still want to return the raw value to avoid automatic updating
// to any of it.
if req.Path == "sys/wrapping/unwrap" &&
resp != nil &&
resp.Data != nil &&
resp.Data[logical.HTTPRawBody] != nil {
// Decode the JSON
if resp.Data[logical.HTTPRawBodyAlreadyJSONDecoded] != nil {
delete(resp.Data, logical.HTTPRawBodyAlreadyJSONDecoded)
} else {
httpResp := &logical.HTTPResponse{}
err := jsonutil.DecodeJSON(resp.Data[logical.HTTPRawBody].([]byte), httpResp)
if err != nil {
c.logger.Error("failed to unmarshal wrapped HTTP response for audit logging", "error", err)
return nil, ErrInternalError
}
auditResp = logical.HTTPResponseToLogicalResponse(httpResp)
}
}
var nonHMACReqDataKeys []string
var nonHMACRespDataKeys []string
if entry != nil {
// Get and set ignored HMAC'd value. Reset those back to empty afterwards.
if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
nonHMACReqDataKeys = rawVals.([]string)
}
// Get and set ignored HMAC'd value. Reset those back to empty afterwards.
if auditResp != nil {
if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok {
nonHMACRespDataKeys = rawVals.([]string)
}
}
}
// Create an audit trail of the response
if !isControlGroupRun(req) {
switch req.Path {
case "sys/replication/dr/status", "sys/replication/performance/status", "sys/replication/status":
default:
logInput := &logical.LogInput{
Auth: auth,
Request: req,
Response: auditResp,
OuterErr: err,
NonHMACReqDataKeys: nonHMACReqDataKeys,
NonHMACRespDataKeys: nonHMACRespDataKeys,
}
if auditErr := c.auditBroker.LogResponse(ctx, logInput); auditErr != nil {
c.logger.Error("failed to audit response", "request_path", req.Path, "error", auditErr)
return nil, ErrInternalError
}
}
}
if walState.LocalIndex != 0 || walState.ReplicatedIndex != 0 {
walState.ClusterID = c.ClusterID()
if walState.LocalIndex == 0 {
if c.perfStandby {
walState.LocalIndex = c.EntLastRemoteWAL()
} else {
walState.LocalIndex = c.EntLastWAL()
}
}
if walState.ReplicatedIndex == 0 {
if c.perfStandby {
walState.ReplicatedIndex = c.entLastRemoteUpstreamWAL()
} else {
walState.ReplicatedIndex = c.EntLastRemoteWAL()
}
}
req.SetResponseState(walState)
}
return
}
func isControlGroupRun(req *logical.Request) bool {
return req.ControlGroup != nil
}
func (c *Core) doRouting(ctx context.Context, req *logical.Request) (*logical.Response, error) {
// If we're replicating and we get a read-only error from a backend, need to forward to primary
resp, err := c.router.Route(ctx, req)
if shouldForward(c, resp, err) {
fwdResp, fwdErr := forward(ctx, c, req)
if fwdErr != nil && err != logical.ErrReadOnly {
// When handling the request locally, we got an error that
// contained ErrReadOnly, but had additional information.
// Since we've now forwarded this request and got _another_
// error, we should tell the user about both errors, so
// they know about both.
//
// When there is no error from forwarding, the request
// succeeded and so no additional context is necessary. When
// the initial error here was only ErrReadOnly, it's likely
// the plugin authors intended to forward this request
// remotely anyway.
repErr, ok := fwdErr.(*logical.ReplicationCodedError)
if ok {
fwdErr = &logical.ReplicationCodedError{
Msg: fmt.Sprintf("errors from both primary and secondary; primary error was %s; secondary errors follow: %s", repErr.Error(), err.Error()),
Code: repErr.Code,
}
} else {
fwdErr = multierror.Append(fwdErr, err)
}
}
return fwdResp, fwdErr
}
return resp, err
}
func (c *Core) isLoginRequest(ctx context.Context, req *logical.Request) bool {
return c.router.LoginPath(ctx, req.Path)
}
func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now())
var nonHMACReqDataKeys []string
entry := c.router.MatchingMountEntry(ctx, req.Path)
if entry != nil {
// Set here so the audit log has it even if authorization fails
req.MountType = entry.Type
req.SetMountRunningSha256(entry.RunningSha256)
req.SetMountRunningVersion(entry.RunningVersion)
req.SetMountIsExternalPlugin(entry.IsExternalPlugin())
req.SetMountClass(entry.MountClass())
// Get and set ignored HMAC'd value.
if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
nonHMACReqDataKeys = rawVals.([]string)
}
}
ns, err := namespace.FromContext(ctx)
if err != nil {
c.logger.Error("failed to get namespace from context", "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return
}
// Validate the token
auth, te, ctErr := c.CheckToken(ctx, req, false)
if ctErr == logical.ErrRelativePath {
return logical.ErrorResponse(ctErr.Error()), nil, ctErr
}
if ctErr == logical.ErrPerfStandbyPleaseForward {
return nil, nil, ctErr
}
// See if the call to CheckToken set any request priority. We push the
// processing down into CheckToken so we only have to do a router lookup
// once.
if auth != nil && auth.HTTPRequestPriority != nil {
ctx = context.WithValue(ctx, logical.CtxKeyInFlightRequestPriority{}, *auth.HTTPRequestPriority)
}
// Updating in-flight request data with client/entity ID
inFlightReqID, ok := ctx.Value(logical.CtxKeyInFlightRequestID{}).(string)
if ok && req.ClientID != "" {
c.UpdateInFlightReqData(inFlightReqID, req.ClientID)
}
// We run this logic first because we want to decrement the use count even
// in the case of an error (assuming we can successfully look up; if we
// need to forward, we exit before now)
if te != nil && !isControlGroupRun(req) {
// Attempt to use the token (decrement NumUses)
var err error
te, err = c.tokenStore.UseToken(ctx, te)
if err != nil {
c.logger.Error("failed to use token", "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, nil, retErr
}
if te == nil {
// Token has been revoked by this point
retErr = multierror.Append(retErr, logical.ErrPermissionDenied, logical.ErrInvalidToken)
return nil, nil, retErr
}
if te.NumUses == tokenRevocationPending {
// We defer a revocation until after logic has run, since this is a
// valid request (this is the token's final use). We pass the ID in
// directly just to be safe in case something else modifies te later.
defer func(id string) {
nsActiveCtx := namespace.ContextWithNamespace(c.activeContext, ns)
leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(nsActiveCtx, te)
if err == nil {
err = c.expiration.LazyRevoke(ctx, leaseID)
}
if err != nil {
c.logger.Error("failed to revoke token", "error", err)
retResp = nil
retAuth = nil
retErr = multierror.Append(retErr, ErrInternalError)
}
if retResp != nil && retResp.Secret != nil &&
// Some backends return a TTL even without a Lease ID
retResp.Secret.LeaseID != "" {
retResp = logical.ErrorResponse("Secret cannot be returned; token had one use left, so leased credentials were immediately revoked.")
return
}
}(te.ID)
}
}
if ctErr != nil {
newCtErr, cgResp, cgAuth, cgRetErr := checkNeedsCG(ctx, c, req, auth, ctErr, nonHMACReqDataKeys)
switch {
case newCtErr != nil:
ctErr = newCtErr
case cgResp != nil || cgAuth != nil:
if cgRetErr != nil {
retErr = multierror.Append(retErr, cgRetErr)
}
return cgResp, cgAuth, retErr
}
// If it is an internal error we return that, otherwise we
// return invalid request so that the status codes can be correct
switch {
case ctErr == ErrInternalError,
errwrap.Contains(ctErr, ErrInternalError.Error()),
ctErr == logical.ErrPermissionDenied,
errwrap.Contains(ctErr, logical.ErrPermissionDenied.Error()):
switch ctErr.(type) {
case *multierror.Error:
retErr = ctErr
default:
retErr = multierror.Append(retErr, ctErr)
}
default:
retErr = multierror.Append(retErr, logical.ErrInvalidRequest)
}
if !isControlGroupRun(req) {
logInput := &logical.LogInput{
Auth: auth,
Request: req,
OuterErr: ctErr,
NonHMACReqDataKeys: nonHMACReqDataKeys,
}
if err := c.auditBroker.LogRequest(ctx, logInput); err != nil {
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
}
}
if errwrap.Contains(retErr, ErrInternalError.Error()) {
return nil, auth, retErr
}
return logical.ErrorResponse(ctErr.Error()), auth, retErr
}
// Attach the display name
req.DisplayName = auth.DisplayName
// Create an audit trail of the request
if !isControlGroupRun(req) {
logInput := &logical.LogInput{
Auth: auth,
Request: req,
NonHMACReqDataKeys: nonHMACReqDataKeys,
}
if err := c.auditBroker.LogRequest(ctx, logInput); err != nil {
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
}
if err := c.entBlockRequestIfError(ns.Path, req.Path); err != nil {
return nil, nil, multierror.Append(retErr, err)
}
leaseGenerated := false
quotaResp, quotaErr := c.applyLeaseCountQuota(ctx, "as.Request{
Path: req.Path,
MountPath: strings.TrimPrefix(req.MountPoint, ns.Path),
NamespacePath: ns.Path,
})
if quotaErr != nil {
c.logger.Error("failed to apply quota", "path", req.Path, "error", quotaErr)
retErr = multierror.Append(retErr, quotaErr)
return nil, auth, retErr
}
if !quotaResp.Allowed {
if c.logger.IsTrace() {
c.logger.Trace("request rejected due to lease count quota violation", "request_path", req.Path)
}
retErr = multierror.Append(retErr, fmt.Errorf("request path %q: %w", req.Path, quotas.ErrLeaseCountQuotaExceeded))
return nil, auth, retErr
}
defer func() {
if quotaResp.Access != nil {
quotaAckErr := c.ackLeaseQuota(quotaResp.Access, leaseGenerated)
if quotaAckErr != nil {
retErr = multierror.Append(retErr, quotaAckErr)
}
}
}()
// This context value will be empty if it's a request that doesn't require a
// snapshot. This is done on purpose and handled in the
// SnapshotStorageRouter
ctx = logical.CreateContextWithSnapshotID(ctx, req.RequiresSnapshotID)
// recover operations require 2 steps
if req.Operation == logical.RecoverOperation {
// first do a read operation
// this will use the snapshot's storage
originalPath := req.Path
req.Operation = logical.ReadOperation
if req.RecoverSourcePath != "" {
req.Path = req.RecoverSourcePath
}
resp, err := c.doRouting(ctx, req)
if err != nil {
return nil, auth, err
}
if resp == nil {
return logical.ErrorResponse("no data in the snapshot"), auth, err
}
if resp.IsError() {
return resp, auth, err
}
// use the response as the data in a recover operation
// set the snapshot ID context value to the empty string to ensure that
// the write goes to the real storage
req.Operation = logical.RecoverOperation
req.Path = originalPath
req.Data = resp.Data
ctx = logical.CreateContextWithSnapshotID(ctx, "")
}
// Route the request
resp, routeErr := c.doRouting(ctx, req)
if resp != nil {
// Add mount type information to the response
if entry != nil {
resp.MountType = entry.Type
}
// If wrapping is used, use the shortest between the request and response
var wrapTTL time.Duration
var wrapFormat, creationPath string
var sealWrap bool
// Ensure no wrap info information is set other than, possibly, the TTL
if resp.WrapInfo != nil {
if resp.WrapInfo.TTL > 0 {
wrapTTL = resp.WrapInfo.TTL
}
wrapFormat = resp.WrapInfo.Format
creationPath = resp.WrapInfo.CreationPath
sealWrap = resp.WrapInfo.SealWrap
resp.WrapInfo = nil
}
if req.WrapInfo != nil {
if req.WrapInfo.TTL > 0 {
switch {
case wrapTTL == 0:
wrapTTL = req.WrapInfo.TTL
case req.WrapInfo.TTL < wrapTTL:
wrapTTL = req.WrapInfo.TTL
}
}
// If the wrap format hasn't been set by the response, set it to
// the request format
if req.WrapInfo.Format != "" && wrapFormat == "" {
wrapFormat = req.WrapInfo.Format
}
}
if wrapTTL > 0 {
resp.WrapInfo = &wrapping.ResponseWrapInfo{
TTL: wrapTTL,
Format: wrapFormat,
CreationPath: creationPath,
SealWrap: sealWrap,
}
}
}
// If there is a secret, we must register it with the expiration manager.
// We exclude renewal of a lease, since it does not need to be re-registered
if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") &&
!strings.HasPrefix(req.Path, "sys/leases/renew") {
// KV mounts should return the TTL but not register
// for a lease as this provides a massive slowdown
registerLease := true
matchingMountEntry := c.router.MatchingMountEntry(ctx, req.Path)
if matchingMountEntry == nil {
c.logger.Error("unable to retrieve kv mount entry from router")
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
switch matchingMountEntry.Type {
case "kv", "generic":
// If we are kv type, first see if we are an older passthrough
// backend, and otherwise check the mount entry options.
matchingBackend := c.router.MatchingBackend(ctx, req.Path)
if matchingBackend == nil {
c.logger.Error("unable to retrieve kv backend from router")
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
if ptbe, ok := matchingBackend.(*PassthroughBackend); ok {
if !ptbe.GeneratesLeases() {
registerLease = false
resp.Secret.Renewable = false
}
} else if matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true" {
registerLease = false
resp.Secret.Renewable = false
} else if req.IsSnapshotReadOrList() {
registerLease = false
resp.Secret.Renewable = false
}
case "plugin":
// If we are a plugin type and the plugin name is "kv" check the
// mount entry options.
if matchingMountEntry.Config.PluginName == "kv" && (matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true" || req.IsSnapshotReadOrList()) {
registerLease = false
resp.Secret.Renewable = false
}
}
if registerLease {
if req.IsSnapshotReadOrList() {
return logical.ErrorResponse("cannot register lease for snapshot read or list"), nil, ErrInternalError
}
sysView := c.router.MatchingSystemView(ctx, req.Path)
if sysView == nil {
c.logger.Error("unable to look up sys view for login path", "request_path", req.Path)
return nil, nil, ErrInternalError
}
ttl, warnings, err := framework.CalculateTTL(sysView, 0, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, time.Time{})
if err != nil {
return nil, nil, err
}
for _, warning := range warnings {
resp.AddWarning(warning)
}
resp.Secret.TTL = ttl
registerFunc, funcGetErr := getLeaseRegisterFunc(c)
if funcGetErr != nil {
retErr = multierror.Append(retErr, funcGetErr)
return nil, auth, retErr
}
leaseID, err := registerFunc(ctx, req, resp, "")
if err != nil {
c.logger.Error("failed to register lease", "request_path", req.Path, "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
leaseGenerated = true
resp.Secret.LeaseID = leaseID
// Count the lease creation
ttl_label := metricsutil.TTLBucket(resp.Secret.TTL)
mountPointWithoutNs := ns.TrimmedPath(req.MountPoint)
c.MetricSink().IncrCounterWithLabels(
[]string{"secret", "lease", "creation"},
1,
[]metrics.Label{
metricsutil.NamespaceLabel(ns),
{"secret_engine", req.MountType},
{"mount_point", mountPointWithoutNs},
{"creation_ttl", ttl_label},
},
)
}
}
// Only the token store is allowed to return an auth block, for any
// other request this is an internal error.
if resp != nil && resp.Auth != nil {
if !strings.HasPrefix(req.Path, "auth/token/") {
c.logger.Error("unexpected Auth response for non-token backend", "request_path", req.Path)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
// Fetch the namespace to which the token belongs
tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c)
if err != nil {
c.logger.Error("failed to fetch token's namespace", "error", err)
retErr = multierror.Append(retErr, err)
return nil, auth, retErr
}
if tokenNS == nil {
c.logger.Error(namespace.ErrNoNamespace.Error())
retErr = multierror.Append(retErr, namespace.ErrNoNamespace)
return nil, auth, retErr
}
_, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, resp.Auth.EntityID, false)
if err != nil {
// Best-effort clean up on error, so we log the cleanup error as a
// warning but still return as internal error.
if err := c.tokenStore.revokeOrphan(ctx, resp.Auth.ClientToken); err != nil {
c.logger.Warn("failed to clean up token lease from entity and policy lookup failure", "request_path", req.Path, "error", err)
}
return nil, nil, ErrInternalError
}
// We skip expiration manager registration for token renewal since it
// does not need to be re-registered
if strings.HasPrefix(req.Path, "auth/token/renew") {
// We build the "policies" list to be returned by starting with
// token policies, and add identity policies right after this
// conditional
tok, _ := c.DecodeSSCToken(req.InboundSSCToken)
if resp.Auth.ClientToken == tok {
resp.Auth.ClientToken = req.InboundSSCToken
}
resp.Auth.Policies = policyutil.SanitizePolicies(resp.Auth.TokenPolicies, policyutil.DoNotAddDefaultPolicy)
} else {
resp.Auth.TokenPolicies = policyutil.SanitizePolicies(resp.Auth.Policies, policyutil.DoNotAddDefaultPolicy)
switch resp.Auth.TokenType {
case logical.TokenTypeBatch:
case logical.TokenTypeService:
if !c.perfStandby {
registeredTokenEntry := &logical.TokenEntry{
TTL: auth.TTL,
Policies: auth.TokenPolicies,
Path: resp.Auth.CreationPath,
NamespaceID: ns.ID,
}
// Only logins apply to role based quotas, so we can omit the role here, as we are not logging in.
if err := c.expiration.RegisterAuth(ctx, registeredTokenEntry, resp.Auth, ""); err != nil {
// Best-effort clean up on error, so we log the cleanup error as
// a warning but still return as internal error.
if err := c.tokenStore.revokeOrphan(ctx, resp.Auth.ClientToken); err != nil {
c.logger.Warn("failed to clean up token lease during auth/token/ request", "request_path", req.Path, "error", err)
}
c.logger.Error("failed to register token lease during auth/token/ request", "request_path", req.Path, "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
if registeredTokenEntry.ExternalID != "" {
resp.Auth.ClientToken = registeredTokenEntry.ExternalID
}
leaseGenerated = true
}
}
}
// We do these later since it's not meaningful for backends/expmgr to
// have what is purely a snapshot of current identity policies, and
// plugins can be confused if they are checking contents of
// Auth.Policies instead of Auth.TokenPolicies
resp.Auth.Policies = policyutil.SanitizePolicies(append(resp.Auth.Policies, identityPolicies[te.NamespaceID]...), policyutil.DoNotAddDefaultPolicy)
resp.Auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[te.NamespaceID], policyutil.DoNotAddDefaultPolicy)
delete(identityPolicies, te.NamespaceID)
resp.Auth.ExternalNamespacePolicies = identityPolicies
}
if resp != nil &&
req.Path == "cubbyhole/response" &&
len(te.Policies) == 1 &&
te.Policies[0] == responseWrappingPolicyName {
resp.AddWarning("Reading from 'cubbyhole/response' is deprecated. Please use sys/wrapping/unwrap to unwrap responses, as it provides additional security checks and other benefits.")
}
// Return the response and error
if routeErr != nil {
if _, ok := routeErr.(*logical.RequestDelegatedAuthError); ok {
routeErr = fmt.Errorf("delegated authentication requested but authentication token present")
}
retErr = multierror.Append(retErr, routeErr)
}
return resp, auth, retErr
}
// handleLoginRequest is used to handle a login request, which is an
// unauthenticated request to the backend.
func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now())
req.Unauthenticated = true
var nonHMACReqDataKeys []string
entry := c.router.MatchingMountEntry(ctx, req.Path)
if entry != nil {
// Set here so the audit log has it even if authorization fails
req.MountType = entry.Type
req.SetMountRunningSha256(entry.RunningSha256)
req.SetMountRunningVersion(entry.RunningVersion)
req.SetMountIsExternalPlugin(entry.IsExternalPlugin())
req.SetMountClass(entry.MountClass())
// Get and set ignored HMAC'd value.
if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
nonHMACReqDataKeys = rawVals.([]string)
}
}
// Do an unauth check. This will cause EGP policies to be checked
var auth *logical.Auth
var ctErr error
auth, _, ctErr = c.CheckToken(ctx, req, true)
if ctErr == logical.ErrPerfStandbyPleaseForward {
return nil, nil, ctErr
}
// Updating in-flight request data with client/entity ID
inFlightReqID, ok := ctx.Value(logical.CtxKeyInFlightRequestID{}).(string)
if ok && req.ClientID != "" {
c.UpdateInFlightReqData(inFlightReqID, req.ClientID)
}
if ctErr != nil {
// If it is an internal error we return that, otherwise we
// return invalid request so that the status codes can be correct
var errType error
switch ctErr {
case ErrInternalError, logical.ErrPermissionDenied:
errType = ctErr
default:
errType = logical.ErrInvalidRequest
}
logInput := &logical.LogInput{
Auth: auth,
Request: req,
OuterErr: ctErr,
NonHMACReqDataKeys: nonHMACReqDataKeys,
}
if err := c.auditBroker.LogRequest(ctx, logInput); err != nil {
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
return nil, nil, ErrInternalError
}
if errType != nil {
retErr = multierror.Append(retErr, errType)
}
if ctErr == ErrInternalError {
return nil, auth, retErr
}
return logical.ErrorResponse(ctErr.Error()), auth, retErr
}
switch req.Path {
case "sys/replication/dr/status", "sys/replication/performance/status", "sys/replication/status":
default:
// Create an audit trail of the request. Attach auth if it was returned,
// e.g. if a token was provided.
logInput := &logical.LogInput{
Auth: auth,
Request: req,
NonHMACReqDataKeys: nonHMACReqDataKeys,
}
if err := c.auditBroker.LogRequest(ctx, logInput); err != nil {
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
return nil, nil, ErrInternalError
}
}
// The token store uses authentication even when creating a new token,
// so it's handled in handleRequest. It should not be reached here.
if strings.HasPrefix(req.Path, "auth/token/") {
c.logger.Error("unexpected login request for token backend", "request_path", req.Path)
return nil, nil, ErrInternalError
}
// check if user lockout feature is disabled
isUserLockoutDisabled, err := c.isUserLockoutDisabled(entry)
if err != nil {
return nil, nil, err
}
// if user lockout feature is not disabled, check if the user is locked
if !isUserLockoutDisabled {
isloginUserLocked, err := c.isUserLocked(ctx, entry, req)
if err != nil {
return nil, nil, err
}
if isloginUserLocked {
c.logger.Error("login attempts exceeded, user is locked out", "request_path", req.Path)
return nil, nil, logical.ErrPermissionDenied
}
}
// Route the request
resp, routeErr := c.doRouting(ctx, req)
handleInvalidCreds := func(err error) (*logical.Response, *logical.Auth, error) {
if !isUserLockoutDisabled {
err := c.failedUserLoginProcess(ctx, entry, req)
if err != nil {
return nil, nil, err
}
}
return resp, nil, err
}
if routeErr != nil {
// if routeErr has invalid credentials error, update the userFailedLoginMap
if routeErr == logical.ErrInvalidCredentials {
return handleInvalidCreds(routeErr)
} else if da, ok := routeErr.(*logical.RequestDelegatedAuthError); ok {
return c.handleDelegatedAuth(ctx, req, da, entry, handleInvalidCreds)
}
}
if resp != nil {
// If wrapping is used, use the shortest between the request and response
var wrapTTL time.Duration
var wrapFormat, creationPath string
var sealWrap bool
// Ensure no wrap info information is set other than, possibly, the TTL
if resp.WrapInfo != nil {
if resp.WrapInfo.TTL > 0 {
wrapTTL = resp.WrapInfo.TTL
}
wrapFormat = resp.WrapInfo.Format
creationPath = resp.WrapInfo.CreationPath
sealWrap = resp.WrapInfo.SealWrap
resp.WrapInfo = nil
}
if req.WrapInfo != nil {
if req.WrapInfo.TTL > 0 {
switch {
case wrapTTL == 0:
wrapTTL = req.WrapInfo.TTL
case req.WrapInfo.TTL < wrapTTL:
wrapTTL = req.WrapInfo.TTL
}
}
if req.WrapInfo.Format != "" && wrapFormat == "" {
wrapFormat = req.WrapInfo.Format
}
}
if wrapTTL > 0 {
resp.WrapInfo = &wrapping.ResponseWrapInfo{
TTL: wrapTTL,
Format: wrapFormat,
CreationPath: creationPath,
SealWrap: sealWrap,
}
}
}
// A login request should never return a secret!
if resp != nil && resp.Secret != nil {
c.logger.Error("unexpected Secret response for login path", "request_path", req.Path)
return nil, nil, ErrInternalError
}
ns, err := namespace.FromContext(ctx)
if err != nil {
c.logger.Error("failed to get namespace from context", "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return
}
// If the response generated an authentication, then generate the token
if resp != nil && resp.Auth != nil && req.Path != "sys/mfa/validate" {
leaseGenerated := false
// by placing this after the authorization check, we don't leak
// information about locked namespaces to unauthenticated clients.
if err := c.entBlockRequestIfError(ns.Path, req.Path); err != nil {
retErr = multierror.Append(retErr, err)
return
}
// Check for request role in context to role based quotas
var role string
reqRole := ctx.Value(logical.CtxKeyRequestRole{})
if reqRole != nil {
role = reqRole.(string)
}
// The request successfully authenticated itself. Run the quota checks
// before creating lease.
quotaResp, quotaErr := c.applyLeaseCountQuota(ctx, "as.Request{
Path: req.Path,
MountPath: strings.TrimPrefix(req.MountPoint, ns.Path),
Role: role,
NamespacePath: ns.Path,
})
if quotaErr != nil {
c.logger.Error("failed to apply quota", "path", req.Path, "error", quotaErr)
retErr = multierror.Append(retErr, quotaErr)
return
}
if !quotaResp.Allowed {
if c.logger.IsTrace() {
c.logger.Trace("request rejected due to lease count quota violation", "request_path", req.Path)
}
retErr = multierror.Append(retErr, fmt.Errorf("request path %q: %w", req.Path, quotas.ErrLeaseCountQuotaExceeded))
return
}
defer func() {
if quotaResp.Access != nil {
quotaAckErr := c.ackLeaseQuota(quotaResp.Access, leaseGenerated)
if quotaAckErr != nil {
retErr = multierror.Append(retErr, quotaAckErr)
}
}
}()
var entity *identity.Entity
auth = resp.Auth
mEntry := c.router.MatchingMountEntry(ctx, req.Path)
if auth.Alias != nil &&
mEntry != nil &&
c.identityStore != nil {
if mEntry.Local && os.Getenv(EnvVaultDisableLocalAuthMountEntities) != "" {
goto CREATE_TOKEN
}
// Overwrite the mount type and mount path in the alias
// information
auth.Alias.MountType = req.MountType
auth.Alias.MountAccessor = req.MountAccessor
auth.Alias.Local = mEntry.Local
if auth.Alias.Name == "" {
return nil, nil, fmt.Errorf("missing name in alias")
}
var err error
// Fetch the entity for the alias, or create an entity if one
// doesn't exist.
entity, entityCreated, err := c.identityStore.CreateOrFetchEntity(ctx, auth.Alias)
if err != nil {
switch auth.Alias.Local {
case true:
// Only create a new entity if the error was a readonly error and the creation flag is true
// i.e the entity was in the middle of being created
if entityCreated && errors.Is(err, logical.ErrReadOnly) {
entity, err = registerLocalAlias(ctx, c, auth.Alias)
if err != nil {
if strings.Contains(err.Error(), errCreateEntityUnimplemented) {
resp.AddWarning("primary cluster doesn't yet issue entities for local auth mounts; falling back to not issuing entities for local auth mounts")
goto CREATE_TOKEN
} else {
return nil, nil, err
}
}
}
default:
entity, entityCreated, err = possiblyForwardAliasCreation(ctx, c, err, auth, entity)
if err != nil {
return nil, nil, err
}
}
}
if entity == nil {
return nil, nil, fmt.Errorf("failed to create an entity for the authenticated alias")
}
if entity.Disabled {
return nil, nil, logical.ErrPermissionDenied
}
auth.EntityID = entity.ID
auth.EntityCreated = entityCreated
validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, auth.EntityID, auth.GroupAliases, req.MountAccessor)
if err != nil {
return nil, nil, err
}
auth.GroupAliases = validAliases
}
CREATE_TOKEN:
// Determine the source of the login
source := c.router.MatchingMount(ctx, req.Path)
// Login MFA
entity, _, err := c.fetchEntityAndDerivedPolicies(ctx, ns, auth.EntityID, true)
if err != nil {
return nil, nil, ErrInternalError
}
// finding the MFAEnforcementConfig that matches the ns and either of
// entityID, MountAccessor, GroupID, or Auth type.
matchedMfaEnforcementList, err := c.buildMFAEnforcementConfigList(ctx, entity, req.Path)
if err != nil {
return nil, nil, fmt.Errorf("failed to find MFAEnforcement configuration, error: %v", err)
}
// (for the context, a response warning above says: "primary cluster
// doesn't yet issue entities for local auth mounts; falling back
// to not issuing entities for local auth mounts")
// based on the above, if the entity is nil, check if MFAEnforcementConfig
// is configured or not. If not, continue as usual, but if there
// is something, then report an error indicating that the user is not
// allowed to login because there is no entity associated with it.
// This is because an entity is needed to enforce MFA.
if entity == nil && len(matchedMfaEnforcementList) > 0 {
// this logic means that an MFAEnforcementConfig was configured with
// only mount type or mount accessor
return nil, nil, logical.ErrPermissionDenied
}
// The resp.Auth has been populated with the information that is required for MFA validation
// This is why, the MFA check is placed at this point. The resp.Auth is going to be fully cached
// in memory so that it would be used to return to the user upon MFA validation is completed.
if entity != nil {
if len(matchedMfaEnforcementList) == 0 && len(req.MFACreds) > 0 {
resp.AddWarning("Found MFA header but failed to find MFA Enforcement Config")
}
// If X-Vault-MFA header is supplied to the login request,
// run single-phase login MFA check, else run two-phase login MFA check
if len(matchedMfaEnforcementList) > 0 && len(req.MFACreds) > 0 {
for _, eConfig := range matchedMfaEnforcementList {
err = c.validateLoginMFA(ctx, eConfig, entity, req.Connection.RemoteAddr, req.MFACreds, nil)
if err != nil {
return nil, nil, logical.ErrPermissionDenied
}
}
} else if len(matchedMfaEnforcementList) > 0 && len(req.MFACreds) == 0 {
mfaRequestID, err := uuid.GenerateUUID()
if err != nil {
return nil, nil, err
}
// sending back the MFARequirement config
mfaRequirement := &logical.MFARequirement{
MFARequestID: mfaRequestID,
MFAConstraints: make(map[string]*logical.MFAConstraintAny),
}
for _, eConfig := range matchedMfaEnforcementList {
onlyMFAEnforcement := len(matchedMfaEnforcementList) == 1
mfaAny, err := c.buildMfaEnforcementResponse(eConfig, entity, onlyMFAEnforcement)
if err != nil {
return nil, nil, err
}
mfaRequirement.MFAConstraints[eConfig.Name] = mfaAny
}
// for two phased MFA enforcement, we should not return the regular auth
// response. This flag is indicate to store the auth response for later
// and return MFARequirement only
respAuth := &MFACachedAuthResponse{
CachedAuth: resp.Auth,
RequestPath: req.Path,
RequestNSID: ns.ID,
RequestNSPath: ns.Path,
RequestConnRemoteAddr: req.Connection.RemoteAddr, // this is needed for the DUO method
TimeOfStorage: time.Now(),
RequestID: mfaRequestID,
}
err = possiblyForwardSaveCachedAuthResponse(ctx, c, respAuth)
if err != nil {
return nil, nil, err
}
auth = nil
resp.Auth = &logical.Auth{
MFARequirement: mfaRequirement,
}
resp.AddWarning("A login request was issued that is subject to MFA validation. Please make sure to validate the login by sending another request to mfa/validate endpoint.")
// going to return early before generating the token
// the user receives the mfaRequirement, and need to use the
// login MFA validate endpoint to get the token
return resp, auth, nil
}
}
// Attach the display name, might be used by audit backends
req.DisplayName = auth.DisplayName
requiresLease := resp.Auth.TokenType != logical.TokenTypeBatch
// If role was not already determined by http.rateLimitQuotaWrapping
// and a lease will be generated, calculate a role for the leaseEntry.
// We can skip this step if there are no pre-existing role-based quotas
// for this mount and Vault is configured to skip lease role-based lease counting
// until after they're created. This effectively zeroes out the lease count
// for new role-based quotas upon creation, rather than counting old leases toward
// the total.
if reqRole == nil && requiresLease && !c.impreciseLeaseRoleTracking {
role = c.DetermineRoleFromLoginRequest(ctx, req.MountPoint, req.Data, req.Connection, req.Headers)
}
leaseGen, respTokenCreate, errCreateToken := c.LoginCreateToken(ctx, ns, req.Path, source, role, resp)
leaseGenerated = leaseGen
if errCreateToken != nil {
return respTokenCreate, nil, errCreateToken
}
resp = respTokenCreate
}
// Successful login, remove any entry from userFailedLoginInfo map
// if it exists. This is done for batch tokens (for oss & ent)
// For service tokens on oss it is taken care by core RegisterAuth function.
// For service tokens on ent it is taken care by registerAuth RPC calls.
// This update is done as part of registerAuth of RPC calls from standby
// to active node. This is added there to reduce RPC calls
if !isUserLockoutDisabled && (auth.TokenType == logical.TokenTypeBatch) {
loginUserInfoKey := FailedLoginUser{
aliasName: auth.Alias.Name,
mountAccessor: auth.Alias.MountAccessor,
}
// We don't need to try to delete the lockedUsers storage entry, since we're
// processing a login request. If a login attempt is allowed, it means the user is
// unlocked and we only add storage entry when the user gets locked.
err = updateUserFailedLoginInfo(ctx, c, loginUserInfoKey, nil, true)
if err != nil {
return nil, nil, err
}
}
// if we were already going to return some error from this login, do that.
// if not, we will then check if the API is locked for the requesting
// namespace, to avoid leaking locked namespaces to unauthenticated clients.
if resp != nil && resp.Data != nil {
if _, ok := resp.Data["error"]; ok {
return resp, auth, routeErr
}
}
if routeErr != nil {
return resp, auth, routeErr
}
// this check handles the bad login credential case
if err := c.entBlockRequestIfError(ns.Path, req.Path); err != nil {
return nil, nil, multierror.Append(retErr, err)
}
return resp, auth, routeErr
}
type invalidCredentialHandler func(err error) (*logical.Response, *logical.Auth, error)
// handleDelegatedAuth when a backend request returns logical.RequestDelegatedAuthError, it is requesting that
// an authentication workflow of its choosing be implemented prior to it being able to accept it. Normally
// this is used for standard protocols that communicate the credential information in a non-standard Vault way
func (c *Core) handleDelegatedAuth(ctx context.Context, origReq *logical.Request, da *logical.RequestDelegatedAuthError, entry *MountEntry, invalidCredHandler invalidCredentialHandler) (*logical.Response, *logical.Auth, error) {
// Make sure we didn't get into a routing loop.
if origReq.ClientTokenSource == logical.ClientTokenFromInternalAuth {
return nil, nil, fmt.Errorf("%w: original request had delegated auth token, "+
"forbidding another delegated request from path '%s'", ErrInternalError, origReq.Path)
}
// Backend has requested internally delegated authentication
requestedAccessor := da.MountAccessor()
if strings.TrimSpace(requestedAccessor) == "" {
return nil, nil, fmt.Errorf("%w: backend returned an invalid mount accessor '%s'", ErrInternalError, requestedAccessor)
}
// First, is this allowed by the mount tunable?
if !slices.Contains(entry.Config.DelegatedAuthAccessors, requestedAccessor) {
return nil, nil, fmt.Errorf("delegated auth to accessor %s not permitted", requestedAccessor)
}
reqNamespace, err := namespace.FromContext(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed looking up namespace from context: %w", err)
}
mount := c.router.MatchingMountByAccessor(requestedAccessor)
if mount == nil {
return nil, nil, fmt.Errorf("%w: requested delegate authentication accessor '%s' was not found", logical.ErrPermissionDenied, requestedAccessor)
}
if mount.Table != credentialTableType {
return nil, nil, fmt.Errorf("%w: requested delegate authentication mount '%s' was not an auth mount", logical.ErrPermissionDenied, requestedAccessor)
}
if mount.NamespaceID != reqNamespace.ID {
return nil, nil, fmt.Errorf("%w: requested delegate authentication mount was in a different namespace than request", logical.ErrPermissionDenied)
}
// Found it, now form the login path and issue the request
path := paths.Join("auth", mount.Path, da.Path())
authReq, err := origReq.Clone()
if err != nil {
return nil, nil, err
}
authReq.MountAccessor = requestedAccessor
authReq.Path = path
authReq.Operation = logical.UpdateOperation
// filter out any response wrapping headers, for our embedded login request
delete(authReq.Headers, textproto.CanonicalMIMEHeaderKey(consts.WrapTTLHeaderName))
authReq.WrapInfo = nil
// Insert the data fields from the delegated auth error in our auth request
authReq.Data = maps.Clone(da.Data())
// Make sure we are going to perform a login request and not expose other backend types to this request
if !c.isLoginRequest(ctx, authReq) {
return nil, nil, fmt.Errorf("delegated path '%s' was not considered a login request", authReq.Path)
}
authResp, err := c.handleCancelableRequest(ctx, authReq)
if err != nil || authResp.IsError() {
// see if the backend wishes to handle the failed auth
if da.AuthErrorHandler() != nil {
if err != nil && errors.Is(err, logical.ErrInvalidCredentials) {
// We purposefully ignore the error here as the handler will
// always return the original error we passed in.
_, _, _ = invalidCredHandler(err)
}
resp, err := da.AuthErrorHandler()(ctx, origReq, authReq, authResp, err)
return resp, nil, err
}
switch err {
case nil:
return authResp, nil, nil
case logical.ErrInvalidCredentials:
return invalidCredHandler(err)
default:
return authResp, nil, err
}
}
if authResp == nil {
return nil, nil, fmt.Errorf("%w: delegated auth request returned empty response for request_path: %s", ErrInternalError, authReq.Path)
}
// A login request should never return a secret!
if authResp.Secret != nil {
return nil, nil, fmt.Errorf("%w: unexpected Secret response for login path for request_path: %s", ErrInternalError, authReq.Path)
}
if authResp.Auth == nil {
return nil, nil, fmt.Errorf("%w: Auth response was nil for request_path: %s", ErrInternalError, authReq.Path)
}
if authResp.Auth.ClientToken == "" {
if authResp.Auth.MFARequirement != nil {
return nil, nil, fmt.Errorf("%w: delegated auth request requiring MFA is not supported: %s", logical.ErrPermissionDenied, authReq.Path)
}
return nil, nil, fmt.Errorf("%w: delegated auth request did not return a client token for login path: %s", ErrInternalError, authReq.Path)
}
// Delegated auth tokens should only be batch tokens, as we don't want to incur
// the cost of storage/tidying for protocols that will be generating a token per
// request.
if !IsBatchToken(authResp.Auth.ClientToken) {
return nil, nil, fmt.Errorf("%w: delegated auth requests must be configured to issue batch tokens", logical.ErrPermissionDenied)
}
// Authentication successful, use the resulting ClientToken to reissue the original request
secondReq, err := origReq.Clone()
if err != nil {
return nil, nil, err
}
secondReq.ClientToken = authResp.Auth.ClientToken
secondReq.ClientTokenSource = logical.ClientTokenFromInternalAuth
resp, err := c.handleCancelableRequest(ctx, secondReq)
return resp, nil, err
}
// LoginCreateToken creates a token as a result of a login request.
// If MFA is enforced, mfa/validate endpoint calls this functions
// after successful MFA validation to generate the token.
func (c *Core) LoginCreateToken(ctx context.Context, ns *namespace.Namespace, reqPath, mountPoint, role string, resp *logical.Response) (bool, *logical.Response, error) {
auth := resp.Auth
source := strings.TrimPrefix(mountPoint, credentialRoutePrefix)
source = strings.ReplaceAll(source, "/", "-")
// Prepend the source to the display name
auth.DisplayName = strings.TrimSuffix(source+auth.DisplayName, "-")
// Determine mount type
mountEntry := c.router.MatchingMountEntry(ctx, reqPath)
if mountEntry == nil {
return false, nil, fmt.Errorf("failed to find a matching mount")
}
sysView := c.router.MatchingSystemView(ctx, reqPath)
if sysView == nil {
c.logger.Error("unable to look up sys view for login path", "request_path", reqPath)
return false, nil, ErrInternalError
}
tokenTTL, warnings, err := framework.CalculateTTL(sysView, 0, auth.TTL, auth.Period, auth.MaxTTL, auth.ExplicitMaxTTL, time.Time{})
if err != nil {
return false, nil, err
}
for _, warning := range warnings {
resp.AddWarning(warning)
}
_, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, ns, auth.EntityID, false)
if err != nil {
return false, nil, ErrInternalError
}
auth.TokenPolicies = policyutil.SanitizePolicies(auth.Policies, !auth.NoDefaultPolicy)
allPolicies := policyutil.SanitizePolicies(append(auth.TokenPolicies, identityPolicies[ns.ID]...), policyutil.DoNotAddDefaultPolicy)
// Prevent internal policies from being assigned to tokens. We check
// this on auth.Policies including derived ones from Identity before
// actually making the token.
for _, policy := range allPolicies {
if policy == "root" {
return false, logical.ErrorResponse("auth methods cannot create root tokens"), logical.ErrInvalidRequest
}
if strutil.StrListContains(nonAssignablePolicies, policy) {
return false, logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), logical.ErrInvalidRequest
}
}
var registerFunc RegisterAuthFunc
var funcGetErr error
// Batch tokens should not be forwarded to perf standby
if auth.TokenType == logical.TokenTypeBatch {
registerFunc = c.RegisterAuth
} else {
registerFunc, funcGetErr = getAuthRegisterFunc(c)
}
if funcGetErr != nil {
return false, nil, funcGetErr
}
leaseGenerated := false
err = registerFunc(ctx, tokenTTL, reqPath, auth, role)
switch {
case err == nil:
if auth.TokenType != logical.TokenTypeBatch {
leaseGenerated = true
}
case errors.Is(err, ErrInternalError), isRetryableRPCError(ctx, err):
return false, nil, err
default:
return false, logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[ns.ID], policyutil.DoNotAddDefaultPolicy)
delete(identityPolicies, ns.ID)
auth.ExternalNamespacePolicies = identityPolicies
auth.Policies = allPolicies
// Count the successful token creation
ttl_label := metricsutil.TTLBucket(tokenTTL)
// Do not include namespace path in mount point; already present as separate label.
mountPointWithoutNs := ns.TrimmedPath(mountPoint)
c.metricSink.IncrCounterWithLabels(
[]string{"token", "creation"},
1,
[]metrics.Label{
metricsutil.NamespaceLabel(ns),
{"auth_method", mountEntry.Type},
{"mount_point", mountPointWithoutNs},
{"creation_ttl", ttl_label},
{"token_type", auth.TokenType.String()},
},
)
return leaseGenerated, resp, nil
}
func isRetryableRPCError(ctx context.Context, err error) bool {
stat, ok := status.FromError(err)
if !ok {
return false
}
switch stat.Code() {
case codes.Unavailable:
return true
case codes.Canceled:
// if the request context is canceled through a deadline exceeded, we
// want to return false. But otherwise, there could have been an EOF or
// the RPC client context has been canceled which should be retried
ctxErr := ctx.Err()
if ctxErr == nil {
return true
}
return !errors.Is(ctxErr, context.DeadlineExceeded)
case codes.Unknown:
// sometimes a missing HTTP content-type error can happen when multiple
// HTTP statuses have been written. This can happen when the error
// occurs in the middle of a response. This should be retried.
return strings.Contains(err.Error(), "malformed header: missing HTTP content-type")
default:
return false
}
}
// failedUserLoginProcess updates the userFailedLoginMap with login count and last failed
// login time for users with failed login attempt
// If the user gets locked for current login attempt, it updates the storage entry too
func (c *Core) failedUserLoginProcess(ctx context.Context, mountEntry *MountEntry, req *logical.Request) error {
// get the user lockout configuration for the user
userLockoutConfiguration := c.getUserLockoutConfiguration(mountEntry)
// determine the key for userFailedLoginInfo map
loginUserInfoKey, err := c.getLoginUserInfoKey(ctx, mountEntry, req)
if err != nil {
return err
}
// get entry from userFailedLoginInfo map for the key
userFailedLoginInfo, err := getUserFailedLoginInfo(ctx, c, loginUserInfoKey)
if err != nil {
return err
}
// update the last failed login time with current time
failedLoginInfo := FailedLoginInfo{
lastFailedLoginTime: int(time.Now().Unix()),
}
// set the failed login count value for the entry in userFailedLoginInfo map
switch userFailedLoginInfo {
case nil: // entry does not exist in userfailedLoginMap
failedLoginInfo.count = 1
default:
failedLoginInfo.count = userFailedLoginInfo.count + 1
// if counter reset, set the count value to 1 as this gets counted as new entry
lastFailedLoginTime := time.Unix(int64(userFailedLoginInfo.lastFailedLoginTime), 0)
counterResetDuration := userLockoutConfiguration.LockoutCounterReset
if time.Now().After(lastFailedLoginTime.Add(counterResetDuration)) {
failedLoginInfo.count = 1
}
}
// update the userFailedLoginInfo map (and/or storage) with the updated/new entry
err = updateUserFailedLoginInfo(ctx, c, loginUserInfoKey, &failedLoginInfo, false)
if err != nil {
return err
}
return nil
}
// getLoginUserInfoKey gets failedUserLoginInfo map key for login user
func (c *Core) getLoginUserInfoKey(ctx context.Context, mountEntry *MountEntry, req *logical.Request) (FailedLoginUser, error) {
userInfo := FailedLoginUser{}
aliasName, err := c.aliasNameFromLoginRequest(ctx, req)
if err != nil {
return userInfo, err
}
if aliasName == "" {
return userInfo, errors.New("failed to determine alias name from login request")
}
userInfo.aliasName = aliasName
userInfo.mountAccessor = mountEntry.Accessor
return userInfo, nil
}
// isUserLockoutDisabled checks if user lockout feature to prevent brute forcing is disabled
// Auth types userpass, ldap and approle support this feature
// precedence: environment var setting >> auth tune setting >> config file setting >> default (enabled)
func (c *Core) isUserLockoutDisabled(mountEntry *MountEntry) (bool, error) {
if !strutil.StrListContains(configutil.GetSupportedUserLockoutsAuthMethods(), mountEntry.Type) {
return true, nil
}
// check environment variable
if disableUserLockoutEnv := os.Getenv(consts.VaultDisableUserLockout); disableUserLockoutEnv != "" {
var err error
disableUserLockout, err := strconv.ParseBool(disableUserLockoutEnv)
if err != nil {
return false, errors.New("Error parsing the environment variable VAULT_DISABLE_USER_LOCKOUT")
}
if disableUserLockout {
return true, nil
}
return false, nil
}
// read auth tune for mount entry
userLockoutConfigFromMount := mountEntry.Config.UserLockoutConfig
if userLockoutConfigFromMount != nil && userLockoutConfigFromMount.DisableLockout {
return true, nil
}
// read config for auth type from config file
userLockoutConfiguration := c.getUserLockoutFromConfig(mountEntry.Type)
if userLockoutConfiguration.DisableLockout {
return true, nil
}
// default
return false, nil
}
// isUserLocked determines if the login user is locked
func (c *Core) isUserLocked(ctx context.Context, mountEntry *MountEntry, req *logical.Request) (locked bool, err error) {
// get userFailedLoginInfo map key for login user
loginUserInfoKey, err := c.getLoginUserInfoKey(ctx, mountEntry, req)
if err != nil {
return false, err
}
// get entry from userFailedLoginInfo map for the key
userFailedLoginInfo, err := getUserFailedLoginInfo(ctx, c, loginUserInfoKey)
if err != nil {
return false, err
}
userLockoutConfiguration := c.getUserLockoutConfiguration(mountEntry)
switch userFailedLoginInfo {
case nil:
// entry not found in userFailedLoginInfo map, check storage to re-verify
ns, err := namespace.FromContext(ctx)
if err != nil {
return false, fmt.Errorf("could not parse namespace from http context: %w", err)
}
storageUserLockoutPath := fmt.Sprintf(coreLockedUsersPath+"%s/%s/%s", ns.ID, loginUserInfoKey.mountAccessor, loginUserInfoKey.aliasName)
existingEntry, err := c.barrier.Get(ctx, storageUserLockoutPath)
if err != nil {
return false, err
}
var lastLoginTime int
if existingEntry == nil {
// no storage entry found, user is not locked
return false, nil
}
err = jsonutil.DecodeJSON(existingEntry.Value, &lastLoginTime)
if err != nil {
return false, err
}
// if time passed from last login time is within lockout duration, the user is locked
if time.Now().Unix()-int64(lastLoginTime) < int64(userLockoutConfiguration.LockoutDuration.Seconds()) {
// user locked
return true, nil
}
// else user is not locked. Entry is stale, this will be removed from storage during cleanup
// by the background thread
default:
// entry found in userFailedLoginInfo map, check if the user is locked
isCountOverLockoutThreshold := userFailedLoginInfo.count >= uint(userLockoutConfiguration.LockoutThreshold)
isWithinLockoutDuration := time.Now().Unix()-int64(userFailedLoginInfo.lastFailedLoginTime) < int64(userLockoutConfiguration.LockoutDuration.Seconds())
if isCountOverLockoutThreshold && isWithinLockoutDuration {
// user locked
return true, nil
}
}
return false, nil
}
// getUserLockoutConfiguration gets the user lockout configuration for a mount entry
// it checks the config file and auth tune values
// precedence: auth tune >> config file values for auth type >> config file values for all type
// >> default user lockout values
// getUserLockoutFromConfig call in this function takes care of config file precedence
func (c *Core) getUserLockoutConfiguration(mountEntry *MountEntry) (userLockoutConfig UserLockoutConfig) {
// get user configuration values from config file
userLockoutConfig = c.getUserLockoutFromConfig(mountEntry.Type)
authTuneUserLockoutConfig := mountEntry.Config.UserLockoutConfig
// if user lockout is not configured using auth tune, return values from config file
if authTuneUserLockoutConfig == nil {
return userLockoutConfig
}
// replace values in return with config file configuration
// for fields that are not configured using auth tune
if authTuneUserLockoutConfig.LockoutThreshold != 0 {
userLockoutConfig.LockoutThreshold = authTuneUserLockoutConfig.LockoutThreshold
}
if authTuneUserLockoutConfig.LockoutDuration != 0 {
userLockoutConfig.LockoutDuration = authTuneUserLockoutConfig.LockoutDuration
}
if authTuneUserLockoutConfig.LockoutCounterReset != 0 {
userLockoutConfig.LockoutCounterReset = authTuneUserLockoutConfig.LockoutCounterReset
}
if authTuneUserLockoutConfig.DisableLockout {
userLockoutConfig.DisableLockout = authTuneUserLockoutConfig.DisableLockout
}
return userLockoutConfig
}
// getUserLockoutFromConfig gets the userlockout configuration for given mount type from config file
// it reads the user lockout configuration from server config
// it has values for "all" type and any mountType that is configured using config file
// "all" type values are updated in shared config with default values i.e; if "all" type is
// not configured in config file, it is updated in shared config with default configuration
// If "all" type is configured in config file, any missing fields are updated with default values
// similarly missing values for a given mount type in config file are updated with "all" type
// default values
// If user_lockout configuration is not configured using config file at all, defaults are returned
func (c *Core) getUserLockoutFromConfig(mountType string) UserLockoutConfig {
defaultUserLockoutConfig := UserLockoutConfig{
LockoutThreshold: configutil.UserLockoutThresholdDefault,
LockoutDuration: configutil.UserLockoutDurationDefault,
LockoutCounterReset: configutil.UserLockoutCounterResetDefault,
DisableLockout: configutil.DisableUserLockoutDefault,
}
conf := c.rawConfig.Load()
if conf == nil {
return defaultUserLockoutConfig
}
userlockouts := conf.(*server.Config).UserLockouts
if userlockouts == nil {
return defaultUserLockoutConfig
}
for _, userLockoutConfig := range userlockouts {
switch userLockoutConfig.Type {
case "all":
defaultUserLockoutConfig = UserLockoutConfig{
LockoutThreshold: userLockoutConfig.LockoutThreshold,
LockoutDuration: userLockoutConfig.LockoutDuration,
LockoutCounterReset: userLockoutConfig.LockoutCounterReset,
DisableLockout: userLockoutConfig.DisableLockout,
}
case mountType:
return UserLockoutConfig{
LockoutThreshold: userLockoutConfig.LockoutThreshold,
LockoutDuration: userLockoutConfig.LockoutDuration,
LockoutCounterReset: userLockoutConfig.LockoutCounterReset,
DisableLockout: userLockoutConfig.DisableLockout,
}
}
}
return defaultUserLockoutConfig
}
func (c *Core) buildMfaEnforcementResponse(eConfig *mfa.MFAEnforcementConfig, entity *identity.Entity, onlyMFAEnforcement bool) (*logical.MFAConstraintAny, error) {
if eConfig == nil {
return nil, fmt.Errorf("MFA enforcement config is nil")
}
if entity == nil {
return nil, fmt.Errorf("entity is nil")
}
mfaAny := &logical.MFAConstraintAny{
Any: []*logical.MFAMethodID{},
}
for _, methodID := range eConfig.MFAMethodIDs {
mConfig, err := c.loginMFABackend.MemDBMFAConfigByID(methodID)
if err != nil {
return nil, fmt.Errorf("failed to get methodID %s from MFA config table, error: %v", methodID, err)
}
var duoUsePasscode bool
if mConfig.Type == mfaMethodTypeDuo {
duoConf, ok := mConfig.Config.(*mfa.Config_DuoConfig)
if !ok {
return nil, fmt.Errorf("invalid MFA configuration type, expected DuoConfig")
}
duoUsePasscode = duoConf.DuoConfig.UsePasscode
}
allowSelfEnrollment := false
if mConfig.Type == mfaMethodTypeTOTP && constants.IsEnterprise {
totpConf, ok := mConfig.Config.(*mfa.Config_TOTPConfig)
if !ok {
return nil, fmt.Errorf("invalid MFA configuration type, expected TOTPConfig")
}
enrollmentEnabled := totpConf.TOTPConfig.GetEnableSelfEnrollment()
_, entityHasMFASecretForMethodID := entity.MFASecrets[methodID]
if enrollmentEnabled && !entityHasMFASecretForMethodID && onlyMFAEnforcement {
// If enable_self_enrollment setting on the TOTP MFA method config is set to
// true and the entity does not have an MFA secret yet, we will allow
// self-service enrollment as long as only a single MFA enforcement applies to
// this login request.
allowSelfEnrollment = true
}
}
mfaMethod := &logical.MFAMethodID{
Type: mConfig.Type,
ID: methodID,
UsesPasscode: mConfig.Type == mfaMethodTypeTOTP || duoUsePasscode,
Name: mConfig.Name,
// This will be used by the client to determine whether it should offer the user
// a way to generate an MFA secret for this method.
SelfEnrollmentEnabled: allowSelfEnrollment,
}
mfaAny.Any = append(mfaAny.Any, mfaMethod)
}
return mfaAny, nil
}
// RegisterAuth uses a logical.Auth object to create a token entry in the token
// store, and registers a corresponding token lease to the expiration manager.
// role is the login role used as part of the creation of the token entry. If not
// relevant, can be omitted (by being provided as "").
func (c *Core) RegisterAuth(ctx context.Context, tokenTTL time.Duration, path string, auth *logical.Auth, role string) error {
// We first assign token policies to what was returned from the backend
// via auth.Policies. Then, we get the full set of policies into
// auth.Policies from the backend + entity information -- this is not
// stored in the token, but we perform sanity checks on it and return
// that information to the user.
// Generate a token
ns, err := namespace.FromContext(ctx)
if err != nil {
return err
}
te := logical.TokenEntry{
Path: path,
Meta: auth.Metadata,
DisplayName: auth.DisplayName,
CreationTime: time.Now().Unix(),
TTL: tokenTTL,
NumUses: auth.NumUses,
EntityID: auth.EntityID,
BoundCIDRs: auth.BoundCIDRs,
Policies: auth.TokenPolicies,
NamespaceID: ns.ID,
ExplicitMaxTTL: auth.ExplicitMaxTTL,
Period: auth.Period,
Type: auth.TokenType,
}
if te.TTL == 0 && (len(te.Policies) != 1 || te.Policies[0] != "root") {
c.logger.Error("refusing to create a non-root zero TTL token")
return ErrInternalError
}
if err := c.tokenStore.create(ctx, &te); err != nil {
c.logger.Error("failed to create token", "error", err)
return possiblyWrapOverloadedError("failed to create token", err)
}
// Populate the client token, accessor, and TTL
auth.ClientToken = te.ID
auth.Accessor = te.Accessor
auth.TTL = te.TTL
auth.Orphan = te.Parent == ""
switch auth.TokenType {
case logical.TokenTypeBatch:
// Ensure it's not marked renewable since it isn't
auth.Renewable = false
case logical.TokenTypeService:
// Register with the expiration manager
if err := c.expiration.RegisterAuth(ctx, &te, auth, role); err != nil {
if err := c.tokenStore.revokeOrphan(ctx, te.ID); err != nil {
c.logger.Warn("failed to clean up token lease during login request", "request_path", path, "error", err)
}
c.logger.Error("failed to register token lease during login request", "request_path", path, "error", err)
return possiblyWrapOverloadedError("failed to register token lease during login request", err)
}
if te.ExternalID != "" {
auth.ClientToken = te.ExternalID
}
// Successful login, remove any entry from userFailedLoginInfo map
// if it exists. This is done for service tokens (for oss) here.
// For ent it is taken care by registerAuth RPC calls.
if auth.Alias != nil {
loginUserInfoKey := FailedLoginUser{
aliasName: auth.Alias.Name,
mountAccessor: auth.Alias.MountAccessor,
}
// We don't need to try to delete the lockedUsers storage entry, since we're
// processing a login request. If a login attempt is allowed, it means the user is
// unlocked and we only add storage entry when the user gets locked.
err = updateUserFailedLoginInfo(ctx, c, loginUserInfoKey, nil, true)
if err != nil {
return err
}
}
}
return nil
}
// LocalGetUserFailedLoginInfo gets the failed login information for a user based on alias name and mountAccessor
func (c *Core) LocalGetUserFailedLoginInfo(ctx context.Context, userKey FailedLoginUser) *FailedLoginInfo {
c.userFailedLoginInfoLock.Lock()
value, exists := c.userFailedLoginInfo[userKey]
c.userFailedLoginInfoLock.Unlock()
if exists {
return value
}
return nil
}
// LocalUpdateUserFailedLoginInfo updates the failed login information for a user based on alias name and mountAccessor
func (c *Core) LocalUpdateUserFailedLoginInfo(ctx context.Context, userKey FailedLoginUser, failedLoginInfo *FailedLoginInfo, deleteEntry bool) error {
c.userFailedLoginInfoLock.Lock()
defer c.userFailedLoginInfoLock.Unlock()
switch deleteEntry {
case false:
// update entry in the map
c.userFailedLoginInfo[userKey] = failedLoginInfo
// get the user lockout configuration for the user
mountEntry := c.router.MatchingMountByAccessor(userKey.mountAccessor)
if mountEntry == nil {
mountEntry = &MountEntry{}
mountEntry.NamespaceID = namespace.RootNamespaceID
}
userLockoutConfiguration := c.getUserLockoutConfiguration(mountEntry)
// if failed login count has reached threshold, create a storage entry as the user got locked
if failedLoginInfo.count >= uint(userLockoutConfiguration.LockoutThreshold) {
// user locked
storageUserLockoutPath := fmt.Sprintf(coreLockedUsersPath+"%s/%s/%s", mountEntry.NamespaceID, userKey.mountAccessor, userKey.aliasName)
compressedBytes, err := jsonutil.EncodeJSONAndCompress(failedLoginInfo.lastFailedLoginTime, nil)
if err != nil {
c.logger.Error("failed to encode or compress failed login user entry", "error", err)
return err
}
// Create an entry
entry := &logical.StorageEntry{
Key: storageUserLockoutPath,
Value: compressedBytes,
}
// Write to the physical backend
if err := c.barrier.Put(ctx, entry); err != nil {
c.logger.Error("failed to persist failed login user entry", "error", err)
return err
}
}
default:
// delete the entry from the map, if no key exists it is no-op
delete(c.userFailedLoginInfo, userKey)
}
return nil
}
// PopulateTokenEntry looks up req.ClientToken in the token store and uses
// it to set other fields in req. Does nothing if ClientToken is empty
// or a JWT token, or for service tokens that don't exist in the token store.
// Should be called with read stateLock held.
func (c *Core) PopulateTokenEntry(ctx context.Context, req *logical.Request) error {
if req.ClientToken == "" {
return nil
}
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors. We ignore errors for
// JWTs.
token := req.ClientToken
var err error
req.InboundSSCToken = token
decodedToken := token
if IsSSCToken(token) {
// If ForwardToActive is set to ForwardSSCTokenToActive, we ignore
// whether the endpoint is a login request, as since we have the token
// forwarded to us, we should treat it as an unauthenticated endpoint
// and ensure the token is populated too regardless.
// Notably, this is important for some endpoints, such as endpoints
// such as sys/ui/mounts/internal, which is unauthenticated but a token
// may be provided to be used.
// Without the check to see if
// c.ForwardToActive() == ForwardSSCTokenToActive unauthenticated
// requests that do not use a token but were provided one anyway
// could fail with a 412.
// We only follow this behaviour if we're a perf standby, as
// this behaviour only makes sense in that case as only they
// could be missing the token population.
// Without ForwardToActive being set to ForwardSSCTokenToActive,
// behaviours that rely on this functionality also wouldn't make
// much sense, as they would fail with 412 required index not present
// as perf standbys aren't guaranteed to have the WAL state
// for new tokens.
unauth := c.isLoginRequest(ctx, req)
if c.ForwardToActive() == ForwardSSCTokenToActive && c.perfStandby {
unauth = false
}
decodedToken, err = c.CheckSSCToken(ctx, token, unauth, c.perfStandby)
// If we receive an error from CheckSSCToken, we can assume the token is bad somehow, and the client
// should receive a 403 bad token error like they do for all other invalid tokens, unless the error
// specifies that we should forward the request or retry the request.
if err != nil {
if errors.Is(err, logical.ErrPerfStandbyPleaseForward) || errors.Is(err, logical.ErrMissingRequiredState) {
return err
}
return logical.ErrPermissionDenied
}
}
req.ClientToken = decodedToken
// We ignore the token returned from CheckSSCToken here as Lookup also
// decodes the SSCT, and it may need the original SSCT to check state.
te, err := c.LookupToken(ctx, token)
if err != nil {
// If we're missing required state, return that error
// as-is to the client
if errors.Is(err, logical.ErrPerfStandbyPleaseForward) || errors.Is(err, logical.ErrMissingRequiredState) {
return err
}
// If we have two dots but the second char is a dot it's a vault
// token of the form s.SOMETHING.nsid, not a JWT
if !IsJWT(token) {
return fmt.Errorf("error performing token check: %w", err)
}
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
req.SetTokenEntry(te)
}
return nil
}
func (c *Core) CheckSSCToken(ctx context.Context, token string, unauth bool, isPerfStandby bool) (string, error) {
if unauth && token != "" {
// This token shouldn't really be here, but alas it was sent along with the request
// Since we're already knee deep in the token checking code pre-existing token checking
// code, we have to deal with this token whether we like it or not. So, we'll just try
// to get the inner token, and if that fails, return the token as-is. We intentionally
// will skip any token checks, because this is an unauthenticated paths and the token
// is just a nuisance rather than a means of auth.
// We cannot return whatever we like here, because if we do then CheckToken, which looks up
// the corresponding lease, will not find the token entry and lease. There are unauth'ed
// endpoints that use the token entry (such as sys/ui/mounts/internal) to do custom token
// checks, which would then fail. Therefore, we must try to get whatever thing is tied to
// token entries, but we must explicitly not do any SSC Token checks.
tok, err := c.DecodeSSCToken(token)
if err != nil || tok == "" {
return token, nil
}
return tok, nil
}
return c.checkSSCTokenInternal(ctx, token, isPerfStandby)
}
// DecodeSSCToken returns the random part of an SSCToken without
// performing any signature or WAL checks.
func (c *Core) DecodeSSCToken(token string) (string, error) {
// Skip batch and old style service tokens. These can have the prefix "b.",
// "s." (for old tokens) or "hvb."
if !IsSSCToken(token) {
return token, nil
}
tok, err := DecodeSSCTokenInternal(token)
if err != nil {
return "", err
}
return tok.Random, nil
}
// DecodeSSCTokenInternal is a helper used to get the inner part of a SSC token without
// checking the token signature or the WAL index.
func DecodeSSCTokenInternal(token string) (*tokens.Token, error) {
signedToken := &tokens.SignedToken{}
// Skip batch and old style service tokens. These can have the prefix "b.",
// "s." (for old tokens) or "hvb."
if !strings.HasPrefix(token, consts.ServiceTokenPrefix) {
return nil, fmt.Errorf("not service token")
}
// Consider the suffix of the token only when unmarshalling
suffixToken := token[4:]
tokenBytes, err := base64.RawURLEncoding.DecodeString(suffixToken)
if err != nil {
return nil, fmt.Errorf("can't decode token")
}
err = proto.Unmarshal(tokenBytes, signedToken)
if err != nil {
return nil, err
}
plainToken := &tokens.Token{}
err2 := proto.Unmarshal([]byte(signedToken.Token), plainToken)
if err2 != nil {
return nil, err2
}
return plainToken, nil
}
func (c *Core) checkSSCTokenInternal(ctx context.Context, token string, isPerfStandby bool) (string, error) {
signedToken := &tokens.SignedToken{}
// Skip batch and old style service tokens. These can have the prefix "b.",
// "s." (for old tokens) or "hvb."
if !strings.HasPrefix(token, consts.ServiceTokenPrefix) {
return token, nil
}
// Check token length to guess if this is an server side consistent token or not.
// Note that even when the DisableSSCTokens flag is set, index
// bearing tokens that have already been given out may still be used.
if !IsSSCToken(token) {
return token, nil
}
// Consider the suffix of the token only when unmarshalling
suffixToken := token[4:]
tokenBytes, err := base64.RawURLEncoding.DecodeString(suffixToken)
if err != nil {
c.logger.Warn("cannot decode token", "error", err)
return token, nil
}
err = proto.Unmarshal(tokenBytes, signedToken)
if err != nil {
// Log a warning here, but don't return an error. This is because we want don't
// want to forward the request to the active node if the token is invalid.
c.logger.Debug("error occurred when unmarshalling ssc token: %w", err)
return token, nil
}
hm, err := c.tokenStore.CalculateSignedTokenHMAC(signedToken.Token)
if !hmac.Equal(hm, signedToken.Hmac) {
// As above, don't return an error so that the request is handled like normal,
// and handled by the node that received it.
c.logger.Debug("token mac is incorrect", "token", signedToken.Token)
return token, nil
}
plainToken := &tokens.Token{}
err = proto.Unmarshal([]byte(signedToken.Token), plainToken)
if err != nil {
return "", err
}
// Disregard SSCT on perf-standbys for non-raft storage
if c.perfStandby && c.getRaftBackend() == nil {
return plainToken.Random, nil
}
ep := int(plainToken.IndexEpoch)
if ep < c.tokenStore.GetSSCTokensGenerationCounter() {
return plainToken.Random, nil
}
requiredWalState := &logical.WALState{ClusterID: c.ClusterID(), LocalIndex: plainToken.LocalIndex, ReplicatedIndex: 0}
if c.HasWALState(requiredWalState, isPerfStandby) {
return plainToken.Random, nil
}
// Make sure to forward the request instead of checking the token if the flag
// is set and we're on a perf standby
if c.ForwardToActive() == ForwardSSCTokenToActive && isPerfStandby {
return "", logical.ErrPerfStandbyPleaseForward
}
// In this case, the server side consistent token cannot be used on this node. We return the appropriate
// status code.
return "", logical.ErrMissingRequiredState
} | go | github | https://github.com/hashicorp/vault | vault/request_handling.go |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Tests\Functional\Bundle\TestBundle\Controller;
use Symfony\Contracts\Translation\TranslatorInterface;
class TransController
{
public function index(TranslatorInterface $translator)
{
$translator->trans('hello_from_controller');
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Functional/Bundle/TestBundle/Controller/TransController.php |
# -*- test-case-name: twisted.test.test_formmethod -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Form-based method objects.
This module contains support for descriptive method signatures that can be used
to format methods.
"""
import calendar
class FormException(Exception):
"""An error occurred calling the form method.
"""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
self.descriptions = kwargs
class InputError(FormException):
"""
An error occurred with some input.
"""
class Argument:
"""Base class for form arguments."""
# default value for argument, if no other default is given
defaultDefault = None
def __init__(self, name, default=None, shortDesc=None,
longDesc=None, hints=None, allowNone=1):
self.name = name
self.allowNone = allowNone
if default is None:
default = self.defaultDefault
self.default = default
self.shortDesc = shortDesc
self.longDesc = longDesc
if not hints:
hints = {}
self.hints = hints
def addHints(self, **kwargs):
self.hints.update(kwargs)
def getHint(self, name, default=None):
return self.hints.get(name, default)
def getShortDescription(self):
return self.shortDesc or self.name.capitalize()
def getLongDescription(self):
return self.longDesc or '' #self.shortDesc or "The %s." % self.name
def coerce(self, val):
"""Convert the value to the correct format."""
raise NotImplementedError, "implement in subclass"
class String(Argument):
"""A single string.
"""
defaultDefault = ''
min = 0
max = None
def __init__(self, name, default=None, shortDesc=None,
longDesc=None, hints=None, allowNone=1, min=0, max=None):
Argument.__init__(self, name, default=default, shortDesc=shortDesc,
longDesc=longDesc, hints=hints, allowNone=allowNone)
self.min = min
self.max = max
def coerce(self, val):
s = str(val)
if len(s) < self.min:
raise InputError, "Value must be at least %s characters long" % self.min
if self.max != None and len(s) > self.max:
raise InputError, "Value must be at most %s characters long" % self.max
return str(val)
class Text(String):
"""A long string.
"""
class Password(String):
"""A string which should be obscured when input.
"""
class VerifiedPassword(String):
"""A string that should be obscured when input and needs verification."""
def coerce(self, vals):
if len(vals) != 2 or vals[0] != vals[1]:
raise InputError, "Please enter the same password twice."
s = str(vals[0])
if len(s) < self.min:
raise InputError, "Value must be at least %s characters long" % self.min
if self.max != None and len(s) > self.max:
raise InputError, "Value must be at most %s characters long" % self.max
return s
class Hidden(String):
"""A string which is not displayed.
The passed default is used as the value.
"""
class Integer(Argument):
"""A single integer.
"""
defaultDefault = None
def __init__(self, name, allowNone=1, default=None, shortDesc=None,
longDesc=None, hints=None):
#although Argument now has allowNone, that was recently added, and
#putting it at the end kept things which relied on argument order
#from breaking. However, allowNone originally was in here, so
#I have to keep the same order, to prevent breaking code that
#depends on argument order only
Argument.__init__(self, name, default, shortDesc, longDesc, hints,
allowNone)
def coerce(self, val):
if not val.strip() and self.allowNone:
return None
try:
return int(val)
except ValueError:
raise InputError, "%s is not valid, please enter a whole number, e.g. 10" % val
class IntegerRange(Integer):
def __init__(self, name, min, max, allowNone=1, default=None, shortDesc=None,
longDesc=None, hints=None):
self.min = min
self.max = max
Integer.__init__(self, name, allowNone=allowNone, default=default, shortDesc=shortDesc,
longDesc=longDesc, hints=hints)
def coerce(self, val):
result = Integer.coerce(self, val)
if self.allowNone and result == None:
return result
if result < self.min:
raise InputError, "Value %s is too small, it should be at least %s" % (result, self.min)
if result > self.max:
raise InputError, "Value %s is too large, it should be at most %s" % (result, self.max)
return result
class Float(Argument):
defaultDefault = None
def __init__(self, name, allowNone=1, default=None, shortDesc=None,
longDesc=None, hints=None):
#although Argument now has allowNone, that was recently added, and
#putting it at the end kept things which relied on argument order
#from breaking. However, allowNone originally was in here, so
#I have to keep the same order, to prevent breaking code that
#depends on argument order only
Argument.__init__(self, name, default, shortDesc, longDesc, hints,
allowNone)
def coerce(self, val):
if not val.strip() and self.allowNone:
return None
try:
return float(val)
except ValueError:
raise InputError, "Invalid float: %s" % val
class Choice(Argument):
"""
The result of a choice between enumerated types. The choices should
be a list of tuples of tag, value, and description. The tag will be
the value returned if the user hits "Submit", and the description
is the bale for the enumerated type. default is a list of all the
values (seconds element in choices). If no defaults are specified,
initially the first item will be selected. Only one item can (should)
be selected at once.
"""
def __init__(self, name, choices=[], default=[], shortDesc=None,
longDesc=None, hints=None, allowNone=1):
self.choices = choices
if choices and not default:
default.append(choices[0][1])
Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone=allowNone)
def coerce(self, inIdent):
for ident, val, desc in self.choices:
if ident == inIdent:
return val
else:
raise InputError("Invalid Choice: %s" % inIdent)
class Flags(Argument):
"""
The result of a checkbox group or multi-menu. The flags should be a
list of tuples of tag, value, and description. The tag will be
the value returned if the user hits "Submit", and the description
is the bale for the enumerated type. default is a list of all the
values (second elements in flags). If no defaults are specified,
initially nothing will be selected. Several items may be selected at
once.
"""
def __init__(self, name, flags=(), default=(), shortDesc=None,
longDesc=None, hints=None, allowNone=1):
self.flags = flags
Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone=allowNone)
def coerce(self, inFlagKeys):
if not inFlagKeys:
return []
outFlags = []
for inFlagKey in inFlagKeys:
for flagKey, flagVal, flagDesc in self.flags:
if inFlagKey == flagKey:
outFlags.append(flagVal)
break
else:
raise InputError("Invalid Flag: %s" % inFlagKey)
return outFlags
class CheckGroup(Flags):
pass
class RadioGroup(Choice):
pass
class Boolean(Argument):
def coerce(self, inVal):
if not inVal:
return 0
lInVal = str(inVal).lower()
if lInVal in ('no', 'n', 'f', 'false', '0'):
return 0
return 1
class File(Argument):
def __init__(self, name, allowNone=1, shortDesc=None, longDesc=None,
hints=None):
self.allowNone = allowNone
Argument.__init__(self, name, None, shortDesc, longDesc, hints)
def coerce(self, file):
if not file and self.allowNone:
return None
elif file:
return file
else:
raise InputError, "Invalid File"
def positiveInt(x):
x = int(x)
if x <= 0: raise ValueError
return x
class Date(Argument):
"""A date -- (year, month, day) tuple."""
defaultDefault = None
def __init__(self, name, allowNone=1, default=None, shortDesc=None,
longDesc=None, hints=None):
Argument.__init__(self, name, default, shortDesc, longDesc, hints)
self.allowNone = allowNone
if not allowNone:
self.defaultDefault = (1970, 1, 1)
def coerce(self, args):
"""Return tuple of ints (year, month, day)."""
if tuple(args) == ("", "", "") and self.allowNone:
return None
try:
year, month, day = map(positiveInt, args)
except ValueError:
raise InputError, "Invalid date"
if (month, day) == (2, 29):
if not calendar.isleap(year):
raise InputError, "%d was not a leap year" % year
else:
return year, month, day
try:
mdays = calendar.mdays[month]
except IndexError:
raise InputError, "Invalid date"
if day > mdays:
raise InputError, "Invalid date"
return year, month, day
class Submit(Choice):
"""Submit button or a reasonable facsimile thereof."""
def __init__(self, name, choices=[("Submit", "submit", "Submit form")],
reset=0, shortDesc=None, longDesc=None, allowNone=0, hints=None):
Choice.__init__(self, name, choices=choices, shortDesc=shortDesc,
longDesc=longDesc, hints=hints)
self.allowNone = allowNone
self.reset = reset
def coerce(self, value):
if self.allowNone and not value:
return None
else:
return Choice.coerce(self, value)
class PresentationHint:
"""
A hint to a particular system.
"""
class MethodSignature:
def __init__(self, *sigList):
"""
"""
self.methodSignature = sigList
def getArgument(self, name):
for a in self.methodSignature:
if a.name == name:
return a
def method(self, callable, takesRequest=False):
return FormMethod(self, callable, takesRequest)
class FormMethod:
"""A callable object with a signature."""
def __init__(self, signature, callable, takesRequest=False):
self.signature = signature
self.callable = callable
self.takesRequest = takesRequest
def getArgs(self):
return tuple(self.signature.methodSignature)
def call(self,*args,**kw):
return self.callable(*args,**kw) | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import Error, _
import pexpect
import time
import sys
class SSH:
def __init__(self, username, host, password=None, getpassword=None):
self.username = username
self.host = host
self.password = password
self.getpassword = getpassword
def _exec(self, command, **kwargs):
p = pexpect.spawn(command, timeout=1)
p.setecho(False)
outlist = []
while True:
i = p.expect([pexpect.EOF, pexpect.TIMEOUT,
r"assword:", r"passphrase for key '.*':",
r"\(yes/no\)?"])
if i == 0:
outlist.append(p.before)
break
elif i == 1:
outlist.append(p.before)
elif i == 2 or i == 3:
if self.password:
password = self.password
elif self.getpassword:
password = self.getpassword()
else:
raise Error, _("SSH asked for password, "
"but no password is available")
p.sendline(password)
outlist = []
elif i == 4:
p.sendline("yes")
outlist = []
while p.isalive():
try:
time.sleep(1)
except (pexpect.TIMEOUT, pexpect.EOF):
# Continue until the child dies
pass
while outlist and outlist[0].startswith("Warning:"):
outlist.pop(0)
return p.exitstatus, "".join(outlist).strip()
def ssh(self, command, **keywd):
return self._exec("ssh %s@%s \"%s\"" %
(self.username, self.host, command), **keywd)
def scp(self, src, dst, recursive=0, **kwargs):
if recursive:
r = "-r "
else:
r = ""
return self._exec("scp %s-c blowfish %s %s@%s:%s" %
(r, src, self.username, self.host, dst), **kwargs)
def rscp(self, src, dst, recursive=0, **kwargs):
if recursive:
r = "-r "
else:
r = ""
return self._exec("scp %s-c blowfish %s@%s:%s %s" %
(r, self.username, self.host, src, dst), **kwargs)
def exists(self, file):
status, output = self.ssh("/bin/ls -ld %s" % file, noerror=1)
return (status == 0)
# vim:ts=4:sw=4:et | unknown | codeparrot/codeparrot-clean | ||
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: sequence
author: Jayson Vantuyl <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
- Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. ``{{ 1 + item|int }}``.
- See also Jinja2 ``range`` filter as an alternative.
options:
start:
description: number at which to start the sequence
default: 0
type: number
end:
description: number at which to end the sequence, dont use this with count
type: number
default: 0
count:
description: number of elements in the sequence, this is not to be used with end
type: number
default: 0
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: number
format:
description: return a string with the generated number formatted in
"""
EXAMPLES = """
- name: create some test users
user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
debug: msg={{item}} seconds to detonation
with_sequence: end=0 start=10
"""
RETURN = """
_list:
description:
- A list containing generated sequence of items
type: list
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import xrange
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
return True
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (C) 1995-2011, 2016 Mark Adler
* Copyright (C) 2017 ARM Holdings Inc.
* Authors:
* Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
* Adam Stylinski <kungfujesus06@gmail.com>
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef ARM_NEON
#include "neon_intrins.h"
#include "zbuild.h"
#include "adler32_p.h"
static void NEON_accum32(uint32_t *s, const uint8_t *buf, size_t len) {
static const uint16_t ALIGNED_(16) taps[64] = {
64, 63, 62, 61, 60, 59, 58, 57,
56, 55, 54, 53, 52, 51, 50, 49,
48, 47, 46, 45, 44, 43, 42, 41,
40, 39, 38, 37, 36, 35, 34, 33,
32, 31, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17,
16, 15, 14, 13, 12, 11, 10, 9,
8, 7, 6, 5, 4, 3, 2, 1 };
uint32x4_t adacc = vdupq_n_u32(0);
uint32x4_t s2acc = vdupq_n_u32(0);
uint32x4_t s2acc_0 = vdupq_n_u32(0);
uint32x4_t s2acc_1 = vdupq_n_u32(0);
uint32x4_t s2acc_2 = vdupq_n_u32(0);
adacc = vsetq_lane_u32(s[0], adacc, 0);
s2acc = vsetq_lane_u32(s[1], s2acc, 0);
uint32x4_t s3acc = vdupq_n_u32(0);
uint32x4_t adacc_prev = adacc;
uint16x8_t s2_0, s2_1, s2_2, s2_3;
s2_0 = s2_1 = s2_2 = s2_3 = vdupq_n_u16(0);
uint16x8_t s2_4, s2_5, s2_6, s2_7;
s2_4 = s2_5 = s2_6 = s2_7 = vdupq_n_u16(0);
size_t num_iter = len >> 2;
int rem = len & 3;
for (size_t i = 0; i < num_iter; ++i) {
uint8x16x4_t d0_d3 = vld1q_u8_x4(buf);
/* Unfortunately it doesn't look like there's a direct sum 8 bit to 32
* bit instruction, we'll have to make due summing to 16 bits first */
uint16x8x2_t hsum, hsum_fold;
hsum.val[0] = vpaddlq_u8(d0_d3.val[0]);
hsum.val[1] = vpaddlq_u8(d0_d3.val[1]);
hsum_fold.val[0] = vpadalq_u8(hsum.val[0], d0_d3.val[2]);
hsum_fold.val[1] = vpadalq_u8(hsum.val[1], d0_d3.val[3]);
adacc = vpadalq_u16(adacc, hsum_fold.val[0]);
s3acc = vaddq_u32(s3acc, adacc_prev);
adacc = vpadalq_u16(adacc, hsum_fold.val[1]);
/* If we do straight widening additions to the 16 bit values, we don't incur
* the usual penalties of a pairwise add. We can defer the multiplications
* until the very end. These will not overflow because we are incurring at
* most 408 loop iterations (NMAX / 64), and a given lane is only going to be
* summed into once. This means for the maximum input size, the largest value
* we will see is 255 * 102 = 26010, safely under uint16 max */
s2_0 = vaddw_u8(s2_0, vget_low_u8(d0_d3.val[0]));
s2_1 = vaddw_high_u8(s2_1, d0_d3.val[0]);
s2_2 = vaddw_u8(s2_2, vget_low_u8(d0_d3.val[1]));
s2_3 = vaddw_high_u8(s2_3, d0_d3.val[1]);
s2_4 = vaddw_u8(s2_4, vget_low_u8(d0_d3.val[2]));
s2_5 = vaddw_high_u8(s2_5, d0_d3.val[2]);
s2_6 = vaddw_u8(s2_6, vget_low_u8(d0_d3.val[3]));
s2_7 = vaddw_high_u8(s2_7, d0_d3.val[3]);
adacc_prev = adacc;
buf += 64;
}
s3acc = vshlq_n_u32(s3acc, 6);
if (rem) {
uint32x4_t s3acc_0 = vdupq_n_u32(0);
while (rem--) {
uint8x16_t d0 = vld1q_u8(buf);
uint16x8_t adler;
adler = vpaddlq_u8(d0);
s2_6 = vaddw_u8(s2_6, vget_low_u8(d0));
s2_7 = vaddw_high_u8(s2_7, d0);
adacc = vpadalq_u16(adacc, adler);
s3acc_0 = vaddq_u32(s3acc_0, adacc_prev);
adacc_prev = adacc;
buf += 16;
}
s3acc_0 = vshlq_n_u32(s3acc_0, 4);
s3acc = vaddq_u32(s3acc_0, s3acc);
}
uint16x8x4_t t0_t3 = vld1q_u16_x4(taps);
uint16x8x4_t t4_t7 = vld1q_u16_x4(taps + 32);
s2acc = vmlal_high_u16(s2acc, t0_t3.val[0], s2_0);
s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[0]), vget_low_u16(s2_0));
s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[1], s2_1);
s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[1]), vget_low_u16(s2_1));
s2acc = vmlal_high_u16(s2acc, t0_t3.val[2], s2_2);
s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.val[2]), vget_low_u16(s2_2));
s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.val[3], s2_3);
s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.val[3]), vget_low_u16(s2_3));
s2acc = vmlal_high_u16(s2acc, t4_t7.val[0], s2_4);
s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[0]), vget_low_u16(s2_4));
s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[1], s2_5);
s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[1]), vget_low_u16(s2_5));
s2acc = vmlal_high_u16(s2acc, t4_t7.val[2], s2_6);
s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.val[2]), vget_low_u16(s2_6));
s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.val[3], s2_7);
s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.val[3]), vget_low_u16(s2_7));
s2acc = vaddq_u32(s2acc_0, s2acc);
s2acc_2 = vaddq_u32(s2acc_1, s2acc_2);
s2acc = vaddq_u32(s2acc, s2acc_2);
uint32x2_t adacc2, s2acc2, as;
s2acc = vaddq_u32(s2acc, s3acc);
adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc));
s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc));
as = vpadd_u32(adacc2, s2acc2);
s[0] = vget_lane_u32(as, 0);
s[1] = vget_lane_u32(as, 1);
}
static void NEON_handle_tail(uint32_t *pair, const uint8_t *buf, size_t len) {
unsigned int i;
for (i = 0; i < len; ++i) {
pair[0] += buf[i];
pair[1] += pair[0];
}
}
Z_INTERNAL uint32_t adler32_neon(uint32_t adler, const uint8_t *buf, size_t len) {
/* split Adler-32 into component sums */
uint32_t sum2 = (adler >> 16) & 0xffff;
adler &= 0xffff;
/* in case user likes doing a byte at a time, keep it fast */
if (len == 1)
return adler32_len_1(adler, buf, sum2);
/* initial Adler-32 value (deferred check for len == 1 speed) */
if (buf == NULL)
return 1L;
/* in case short lengths are provided, keep it somewhat fast */
if (len < 16)
return adler32_len_16(adler, buf, len, sum2);
uint32_t pair[2];
int n = NMAX;
unsigned int done = 0;
/* Split Adler-32 into component sums, it can be supplied by
* the caller sites (e.g. in a PNG file).
*/
pair[0] = adler;
pair[1] = sum2;
/* If memory is not SIMD aligned, do scalar sums to an aligned
* offset, provided that doing so doesn't completely eliminate
* SIMD operation. Aligned loads are still faster on ARM, even
* though there's no explicit aligned load instruction */
unsigned int align_offset = ((uintptr_t)buf & 15);
unsigned int align_adj = (align_offset) ? 16 - align_offset : 0;
if (align_offset && len >= (16 + align_adj)) {
NEON_handle_tail(pair, buf, align_adj);
n -= align_adj;
done += align_adj;
} else {
/* If here, we failed the len criteria test, it wouldn't be
* worthwhile to do scalar aligning sums */
align_adj = 0;
}
while (done < len) {
int remaining = (int)(len - done);
n = MIN(remaining, (done == align_adj) ? n : NMAX);
if (n < 16)
break;
NEON_accum32(pair, buf + done, n >> 4);
pair[0] %= BASE;
pair[1] %= BASE;
int actual_nsums = (n >> 4) << 4;
done += actual_nsums;
}
/* Handle the tail elements. */
if (done < len) {
NEON_handle_tail(pair, (buf + done), len - done);
pair[0] %= BASE;
pair[1] %= BASE;
}
/* D = B * 65536 + A, see: https://en.wikipedia.org/wiki/Adler-32. */
return (pair[1] << 16) | pair[0];
}
#endif | c | github | https://github.com/opencv/opencv | 3rdparty/zlib-ng/arch/arm/adler32_neon.c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# crawl.py - Greenlets-based Bitcoin network crawler.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Greenlets-based Bitcoin network crawler.
"""
from gevent import monkey
monkey.patch_all()
import geoip2.database
import gevent
import json
import logging
import os
import random
import redis
import redis.connection
import requests
import socket
import sys
import time
from binascii import hexlify, unhexlify
from collections import Counter
from ConfigParser import ConfigParser
from geoip2.errors import AddressNotFoundError
from ipaddress import ip_address, ip_network
from protocol import (
ONION_V3_LEN,
TO_SERVICES,
Connection,
ConnectionError,
ProtocolError,
)
from utils import new_redis_conn, get_keys, ip_to_network
redis.connection.socket = gevent.socket
REDIS_CONN = None
CONF = {}
# MaxMind databases
ASN = geoip2.database.Reader("geoip/GeoLite2-ASN.mmdb")
def enumerate_node(redis_pipe, addr_msgs, now):
"""
Adds all peering nodes with age <= max. age into the crawl set.
"""
peers = 0
excluded = 0
for addr_msg in addr_msgs:
if 'addr_list' in addr_msg:
for peer in addr_msg['addr_list']:
if peer['onion'] and len(peer['onion']) == ONION_V3_LEN:
logging.debug("onion v3 node: %s", peer)
age = now - peer['timestamp'] # seconds
if age >= 0 and age <= CONF['max_age']:
address = peer['ipv4'] or peer['ipv6'] or peer['onion']
port = peer['port'] if peer['port'] > 0 else CONF['port']
services = peer['services']
if not address:
continue
if is_excluded(address):
logging.debug("Exclude: (%s, %d)", address, port)
excluded += 1
continue
redis_pipe.sadd('pending', (address, port, services))
peers += 1
if peers >= CONF['peers_per_node']:
return (peers, excluded)
return (peers, excluded)
def connect(redis_conn, key):
"""
Establishes connection with a node to:
1) Send version message
2) Receive version and verack message
3) Send getaddr message
4) Receive addr message containing list of peering nodes
Stores state and height for node in Redis.
"""
version_msg = {}
addr_msgs = []
redis_conn.set(key, "") # Set Redis key for a new node
(address, port, services) = key[5:].split("-", 2)
services = int(services)
height = redis_conn.get('height')
if height:
height = int(height)
proxy = None
if address.endswith(".onion"):
proxy = random.choice(CONF['tor_proxies'])
conn = Connection((address, int(port)),
(CONF['source_address'], 0),
magic_number=CONF['magic_number'],
socket_timeout=CONF['socket_timeout'],
proxy=proxy,
protocol_version=CONF['protocol_version'],
to_services=services,
from_services=CONF['services'],
user_agent=CONF['user_agent'],
height=height,
relay=CONF['relay'])
try:
logging.debug("Connecting to %s", conn.to_addr)
conn.open()
version_msg = conn.handshake()
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("%s: %s", conn.to_addr, err)
redis_pipe = redis_conn.pipeline()
if version_msg:
try:
conn.getaddr(block=False)
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("%s: %s", conn.to_addr, err)
else:
addr_wait = 0
while addr_wait < CONF['socket_timeout']:
addr_wait += 1
gevent.sleep(0.3)
try:
msgs = conn.get_messages(commands=['addr', 'addrv2'])
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("%s: %s", conn.to_addr, err)
break
if msgs and any([msg['count'] > 1 for msg in msgs]):
addr_msgs = msgs
break
from_services = version_msg.get('services', 0)
if from_services != services:
logging.debug("%s Expected %d, got %d for services", conn.to_addr,
services, from_services)
key = "node:{}-{}-{}".format(address, port, from_services)
height_key = "height:{}-{}-{}".format(address, port, from_services)
redis_pipe.setex(height_key, CONF['max_age'],
version_msg.get('height', 0))
now = int(time.time())
(peers, excluded) = enumerate_node(redis_pipe, addr_msgs, now)
logging.debug("%s Peers: %d (Excluded: %d)",
conn.to_addr, peers, excluded)
redis_pipe.set(key, "")
redis_pipe.sadd('up', key)
conn.close()
redis_pipe.execute()
def dump(timestamp, nodes):
"""
Dumps data for reachable nodes into timestamp-prefixed JSON file and
returns most common height from the nodes.
"""
json_data = []
logging.info('Building JSON data')
for node in nodes:
(address, port, services) = node[5:].split("-", 2)
height_key = "height:{}-{}-{}".format(address, port, services)
try:
height = int(REDIS_CONN.get(height_key))
except TypeError:
logging.warning("%s missing", height_key)
height = 0
json_data.append([address, int(port), int(services), height])
logging.info('Built JSON data: %d', len(json_data))
if len(json_data) == 0:
logging.warning("len(json_data): %d", len(json_data))
return 0
json_output = os.path.join(CONF['crawl_dir'], "{}.json".format(timestamp))
open(json_output, 'w').write(json.dumps(json_data))
logging.info("Wrote %s", json_output)
return Counter([node[-1] for node in json_data]).most_common(1)[0][0]
def restart(timestamp):
"""
Dumps data for the reachable nodes into a JSON file.
Loads all reachable nodes from Redis into the crawl set.
Removes keys for all nodes from current crawl.
Updates excluded networks with current list of bogons.
Updates number of reachable nodes and most common height in Redis.
"""
redis_pipe = REDIS_CONN.pipeline()
nodes = REDIS_CONN.smembers('up') # Reachable nodes
redis_pipe.delete('up')
for node in nodes:
(address, port, services) = node[5:].split("-", 2)
redis_pipe.sadd('pending', (address, int(port), int(services)))
for key in get_keys(REDIS_CONN, 'node:*'):
redis_pipe.delete(key)
for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
redis_pipe.delete(key)
if CONF['include_checked']:
checked_nodes = REDIS_CONN.zrangebyscore(
'check', timestamp - CONF['max_age'], timestamp)
for node in checked_nodes:
(address, port, services) = eval(node)
if is_excluded(address):
logging.debug("Exclude: %s", address)
continue
redis_pipe.sadd('pending', (address, port, services))
redis_pipe.execute()
update_excluded_networks()
reachable_nodes = len(nodes)
logging.info("Reachable nodes: %d", reachable_nodes)
REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))
height = dump(timestamp, nodes)
REDIS_CONN.set('height', height)
logging.info("Height: %d", height)
def cron():
"""
Assigned to a worker to perform the following tasks periodically to
maintain a continuous crawl:
1) Reports the current number of nodes in crawl set
2) Initiates a new crawl once the crawl set is empty
"""
start = int(time.time())
while True:
pending_nodes = REDIS_CONN.scard('pending')
logging.info("Pending: %d", pending_nodes)
if pending_nodes == 0:
REDIS_CONN.set('crawl:master:state', "starting")
now = int(time.time())
elapsed = now - start
REDIS_CONN.set('elapsed', elapsed)
logging.info("Elapsed: %d", elapsed)
logging.info("Restarting")
restart(now)
while int(time.time()) - start < CONF['snapshot_delay']:
gevent.sleep(1)
start = int(time.time())
REDIS_CONN.set('crawl:master:state', "running")
gevent.sleep(CONF['cron_delay'])
def task():
"""
Assigned to a worker to retrieve (pop) a node from the crawl set and
attempt to establish connection with a new node.
"""
redis_conn = new_redis_conn(db=CONF['db'])
while True:
if not CONF['master']:
while REDIS_CONN.get('crawl:master:state') != "running":
gevent.sleep(CONF['socket_timeout'])
node = redis_conn.spop('pending') # Pop random node from set
if node is None:
gevent.sleep(1)
continue
node = eval(node) # Convert string from Redis to tuple
# Skip IPv6 node
if ":" in node[0] and not CONF['ipv6']:
continue
key = "node:{}-{}-{}".format(node[0], node[1], node[2])
if redis_conn.exists(key):
continue
# Check if prefix has hit its limit
if ":" in node[0] and CONF['ipv6_prefix'] < 128:
cidr = ip_to_network(node[0], CONF['ipv6_prefix'])
nodes = redis_conn.incr('crawl:cidr:{}'.format(cidr))
if nodes > CONF['nodes_per_ipv6_prefix']:
logging.debug("CIDR %s: %d", cidr, nodes)
continue
connect(redis_conn, key)
def set_pending():
"""
Initializes pending set in Redis with a list of reachable nodes from DNS
seeders and hardcoded list of .onion nodes to bootstrap the crawler.
"""
for seeder in CONF['seeders']:
nodes = []
try:
ipv4_nodes = socket.getaddrinfo(seeder, None, socket.AF_INET)
except socket.gaierror as err:
logging.warning("%s", err)
else:
nodes.extend(ipv4_nodes)
if CONF['ipv6']:
try:
ipv6_nodes = socket.getaddrinfo(seeder, None, socket.AF_INET6)
except socket.gaierror as err:
logging.warning("%s", err)
else:
nodes.extend(ipv6_nodes)
for node in nodes:
address = node[-1][0]
if is_excluded(address):
logging.debug("Exclude: %s", address)
continue
logging.debug("%s: %s", seeder, address)
REDIS_CONN.sadd('pending', (address, CONF['port'], TO_SERVICES))
if CONF['onion']:
for address in CONF['onion_nodes']:
REDIS_CONN.sadd('pending', (address, CONF['port'], TO_SERVICES))
def is_excluded(address):
"""
Returns True if address is found in exclusion list, False if otherwise.
"""
if address.endswith(".onion"):
return False
if ip_address(unicode(address)).is_private:
return True
if ":" in address:
address_family = socket.AF_INET6
key = 'exclude_ipv6_networks'
else:
address_family = socket.AF_INET
key = 'exclude_ipv4_networks'
try:
asn_record = ASN.asn(address)
except AddressNotFoundError:
asn = None
else:
asn = 'AS{}'.format(asn_record.autonomous_system_number)
try:
addr = int(hexlify(socket.inet_pton(address_family, address)), 16)
except socket.error:
logging.warning("Bad address: %s", address)
return True
if any([(addr & net[1] == net[0]) for net in CONF[key]]):
return True
if asn and asn in CONF['exclude_asns']:
return True
return False
def list_excluded_networks(txt, networks=None):
"""
Converts list of networks from configuration file into a list of tuples of
network address and netmask to be excluded from the crawl.
"""
if networks is None:
networks = set()
lines = txt.strip().split("\n")
for line in lines:
line = line.split('#')[0].strip()
try:
network = ip_network(unicode(line))
except ValueError:
continue
else:
networks.add((int(network.network_address), int(network.netmask)))
return networks
def update_excluded_networks():
"""
Adds bogons into the excluded IPv4 and IPv6 networks.
"""
if CONF['exclude_ipv4_bogons']:
urls = [
"http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt",
]
for url in urls:
try:
response = requests.get(url, timeout=15)
except requests.exceptions.RequestException as err:
logging.warning(err)
else:
if response.status_code == 200:
CONF['exclude_ipv4_networks'] = list_excluded_networks(
response.content,
networks=CONF['exclude_ipv4_networks'])
logging.info("IPv4: %d",
len(CONF['exclude_ipv4_networks']))
if CONF['exclude_ipv6_bogons']:
urls = [
"http://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt",
]
for url in urls:
try:
response = requests.get(url, timeout=15)
except requests.exceptions.RequestException as err:
logging.warning(err)
else:
if response.status_code == 200:
CONF['exclude_ipv6_networks'] = list_excluded_networks(
response.content,
networks=CONF['exclude_ipv6_networks'])
logging.info("IPv6: %d",
len(CONF['exclude_ipv6_networks']))
def init_conf(argv):
"""
Populates CONF with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('crawl', 'logfile')
CONF['magic_number'] = unhexlify(conf.get('crawl', 'magic_number'))
CONF['port'] = conf.getint('crawl', 'port')
CONF['db'] = conf.getint('crawl', 'db')
CONF['seeders'] = conf.get('crawl', 'seeders').strip().split("\n")
CONF['workers'] = conf.getint('crawl', 'workers')
CONF['debug'] = conf.getboolean('crawl', 'debug')
CONF['source_address'] = conf.get('crawl', 'source_address')
CONF['protocol_version'] = conf.getint('crawl', 'protocol_version')
CONF['user_agent'] = conf.get('crawl', 'user_agent')
CONF['services'] = conf.getint('crawl', 'services')
CONF['relay'] = conf.getint('crawl', 'relay')
CONF['socket_timeout'] = conf.getint('crawl', 'socket_timeout')
CONF['cron_delay'] = conf.getint('crawl', 'cron_delay')
CONF['snapshot_delay'] = conf.getint('crawl', 'snapshot_delay')
CONF['max_age'] = conf.getint('crawl', 'max_age')
CONF['peers_per_node'] = conf.getint('crawl', 'peers_per_node')
CONF['ipv6'] = conf.getboolean('crawl', 'ipv6')
CONF['ipv6_prefix'] = conf.getint('crawl', 'ipv6_prefix')
CONF['nodes_per_ipv6_prefix'] = conf.getint('crawl',
'nodes_per_ipv6_prefix')
CONF['exclude_asns'] = conf.get('crawl',
'exclude_asns').strip().split("\n")
CONF['exclude_ipv4_networks'] = list_excluded_networks(
conf.get('crawl', 'exclude_ipv4_networks'))
CONF['exclude_ipv6_networks'] = list_excluded_networks(
conf.get('crawl', 'exclude_ipv6_networks'))
CONF['exclude_ipv4_bogons'] = conf.getboolean('crawl',
'exclude_ipv4_bogons')
CONF['exclude_ipv6_bogons'] = conf.getboolean('crawl',
'exclude_ipv6_bogons')
CONF['onion'] = conf.getboolean('crawl', 'onion')
CONF['tor_proxies'] = []
if CONF['onion']:
tor_proxies = conf.get('crawl', 'tor_proxies').strip().split("\n")
CONF['tor_proxies'] = [
(p.split(":")[0], int(p.split(":")[1])) for p in tor_proxies]
CONF['onion_nodes'] = conf.get('crawl', 'onion_nodes').strip().split("\n")
CONF['include_checked'] = conf.getboolean('crawl', 'include_checked')
CONF['crawl_dir'] = conf.get('crawl', 'crawl_dir')
if not os.path.exists(CONF['crawl_dir']):
os.makedirs(CONF['crawl_dir'])
# Set to True for master process
CONF['master'] = argv[2] == "master"
def main(argv):
if len(argv) < 3 or not os.path.exists(argv[1]):
print("Usage: crawl.py [config] [master|slave]")
return 1
# Initialize global conf
init_conf(argv)
# Initialize logger
loglevel = logging.INFO
if CONF['debug']:
loglevel = logging.DEBUG
logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
"(%(funcName)s) %(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=CONF['logfile'],
filemode='a')
print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))
global REDIS_CONN
REDIS_CONN = new_redis_conn(db=CONF['db'])
if CONF['master']:
REDIS_CONN.set('crawl:master:state', "starting")
logging.info("Removing all keys")
redis_pipe = REDIS_CONN.pipeline()
redis_pipe.delete('up')
for key in get_keys(REDIS_CONN, 'node:*'):
redis_pipe.delete(key)
for key in get_keys(REDIS_CONN, 'crawl:cidr:*'):
redis_pipe.delete(key)
redis_pipe.delete('pending')
redis_pipe.execute()
set_pending()
update_excluded_networks()
REDIS_CONN.set('crawl:master:state', "running")
# Spawn workers (greenlets) including one worker reserved for cron tasks
workers = []
if CONF['master']:
workers.append(gevent.spawn(cron))
for _ in xrange(CONF['workers'] - len(workers)):
workers.append(gevent.spawn(task))
logging.info("Workers: %d", len(workers))
gevent.joinall(workers)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
from twitter.common.contextutil import temporary_dir
from twitter.common.dirutil import touch
from apache.thermos.common.constants import DEFAULT_CHECKPOINT_ROOT
from apache.thermos.common.path import TaskPath
from apache.thermos.monitoring.detector import ChainedPathDetector, FixedPathDetector, TaskDetector
def test_task_detector():
with temporary_dir() as root:
active_log_dir = os.path.join(root, 'active_log')
finished_log_dir = os.path.join(root, 'finished_log')
path = TaskPath(root=root)
detector = TaskDetector(root)
# test empty paths
assert list(detector.get_task_ids(state='active')) == []
assert list(detector.get_task_ids(state='finished')) == []
assert set(detector.get_task_ids()) == set()
assert detector.get_checkpoint(task_id='active_task') == path.given(
task_id='active_task').getpath('runner_checkpoint')
assert detector.get_checkpoint(task_id='finished_task') == path.given(
task_id='finished_task').getpath('runner_checkpoint')
assert set(detector.get_process_checkpoints('active_task')) == set()
assert set(detector.get_process_checkpoints('finished_task')) == set()
assert set(detector.get_process_runs('active_task', active_log_dir)) == set()
assert set(detector.get_process_runs('finished_task', finished_log_dir)) == set()
assert set(detector.get_process_logs('active_task', active_log_dir)) == set()
assert set(detector.get_process_logs('finished_task', finished_log_dir)) == set()
# create paths
paths = [
path.given(state='active', task_id='active_task').getpath('task_path'),
path.given(state='finished', task_id='finished_task').getpath('task_path'),
path.given(task_id='active_task').getpath('runner_checkpoint'),
path.given(task_id='finished_task').getpath('runner_checkpoint'),
path.given(
task_id='active_task',
process='hello_world',
run='0',
log_dir=active_log_dir
).with_filename('stdout').getpath('process_logdir'),
path.given(
task_id='finished_task',
process='goodbye_world',
run='1',
log_dir=finished_log_dir
).with_filename('stderr').getpath('process_logdir'),
path.given(task_id='active_task', process='hello_world').getpath('process_checkpoint'),
path.given(task_id='finished_task', process='goodbye_world').getpath('process_checkpoint'),
]
for p in paths:
touch(p)
detector = TaskDetector(root)
assert list(detector.get_task_ids(state='active')) == list([('active', 'active_task')])
assert list(detector.get_task_ids(state='finished')) == list([('finished', 'finished_task')])
assert set(detector.get_task_ids()) == set(
[('active', 'active_task'), ('finished', 'finished_task')])
assert list(detector.get_process_checkpoints('active_task')) == [
path.given(task_id='active_task', process='hello_world').getpath('process_checkpoint')]
assert list(detector.get_process_checkpoints('finished_task')) == [
path.given(task_id='finished_task', process='goodbye_world').getpath('process_checkpoint')]
assert list(detector.get_process_runs('active_task', active_log_dir)) == [
('hello_world', 0)]
assert list(detector.get_process_runs('finished_task', finished_log_dir)) == [
('goodbye_world', 1)]
assert list(detector.get_process_logs('active_task', active_log_dir)) == [
path.given(
task_id='active_task',
process='hello_world',
run='0',
log_dir=active_log_dir
).with_filename('stdout').getpath('process_logdir')]
assert list(detector.get_process_logs('finished_task', finished_log_dir)) == [
path.given(
task_id='finished_task',
process='goodbye_world',
run='1',
log_dir=finished_log_dir
).with_filename('stderr').getpath('process_logdir')]
def test_fixed_path_detector():
# Default is TaskPath default
fpd = FixedPathDetector()
assert fpd.get_paths() == [DEFAULT_CHECKPOINT_ROOT]
# Non-default
root = '/var/lib/derp'
fpd = FixedPathDetector(path=root)
assert fpd.get_paths() == [root]
def test_fixed_path_detector_constructor():
with pytest.raises(TypeError):
FixedPathDetector(path=234)
def test_chained_path_detector():
root1 = '/var/lib/derp1'
root2 = '/var/lib/derp2'
fpd1 = FixedPathDetector(path=root1)
fpd2 = FixedPathDetector(path=root2)
cpd = ChainedPathDetector(fpd1, fpd2)
assert set(cpd.get_paths()) == set([root1, root2])
def test_chained_path_detector_constructor():
with pytest.raises(TypeError):
ChainedPathDetector(1, 2, 3)
with pytest.raises(TypeError):
ChainedPathDetector(FixedPathDetector(), 'hello') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from savu.plugins.plugin import Plugin
"""
.. module:: plugins_test
:platform: Unix
:synopsis: unittest test classes for plugins
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import unittest
from savu.plugins import utils as pu
from savu.test import test_utils as tu
from savu.plugins.cpu_plugin import CpuPlugin
base_class_name = "savu.plugins.plugin"
class PluginTest(unittest.TestCase):
def setUp(self):
self.plugin_name = base_class_name
def test_get_plugin(self):
try :
plugin = pu.load_plugin(self.plugin_name)
self.assertIsNotNone(plugin)
except ImportError as e:
print("Failed to run plugin test as libraries not available (%s), passing test" % (e))
pass
def test_process(self):
try:
plugin = pu.load_plugin(self.plugin_name)
if self.plugin_name == base_class_name:
self.assertRaises(NotImplementedError, plugin.process,
"test", "test", 1, 1)
return
# load appropriate data
data = tu.get_appropriate_input_data(plugin)
self.assertGreater(len(data), 0, "Cannot find appropriate test data")
# generate somewhere for the data to go
output = tu.get_appropriate_output_data(plugin, data)
self.assertGreater(len(output), 0,
"Cannot create appropriate output data")
plugin.set_parameters(None)
for i in range(len(data)):
plugin.run_process(data[i], output[i], ["CPU0"], 0)
print("Output from plugin under test ( %s ) is in %s" %
(plugin.name, output[i].backing_file.filename))
data[i].complete()
output[i].complete()
except ImportError as e:
print("Failed to run plugin test as libraries not available (%s), passing test" % (e))
pass
class CpuPluginWrapper(Plugin, CpuPlugin):
def __init__(self):
super(CpuPluginWrapper, self).__init__()
self.data = None
self.output = None
self.processes = None
self.process_number = None
def process(self, data, output, processes, process):
self.data = data
self.output = output
self.processes = processes
self.process_number = process
class CpuPluginTest(unittest.TestCase):
def setUp(self):
self.plugin = None
def test_run_process(self):
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out", ["CPU0"], 0)
self.assertEqual(self.plugin.processes, ["CPU0"])
self.assertEqual(self.plugin.process_number, 0)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "CPU1", "CPU2", "CPU3"], 0)
self.assertEqual(self.plugin.processes,
["CPU0", "CPU1", "CPU2", "CPU3"])
self.assertEqual(self.plugin.process_number, 0)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "CPU1", "CPU2", "CPU3"], 1)
self.assertEqual(self.plugin.processes,
["CPU0", "CPU1", "CPU2", "CPU3"])
self.assertEqual(self.plugin.process_number, 1)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "CPU1", "CPU2", "CPU3"], 3)
self.assertEqual(self.plugin.processes,
["CPU0", "CPU1", "CPU2", "CPU3"])
self.assertEqual(self.plugin.process_number, 3)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "GPU0", "CPU1", "GPU1"], 0)
self.assertEqual(self.plugin.processes, ["CPU0", "CPU1"])
self.assertEqual(self.plugin.process_number, 0)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "GPU0", "CPU1", "GPU1"], 1)
self.assertEqual(self.plugin.processes, None)
self.assertEqual(self.plugin.process_number, None)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "GPU0", "CPU1", "GPU1"], 2)
self.assertEqual(self.plugin.processes, ["CPU0", "CPU1"])
self.assertEqual(self.plugin.process_number, 1)
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out",
["CPU0", "GPU0", "CPU1", "GPU1"], 3)
self.assertEqual(self.plugin.processes, None)
self.assertEqual(self.plugin.process_number, None)
def test_run_cpu6_gpu2(self):
all_procs = ["CPU0", "CPU1", "CPU2", "CPU3",
"CPU4", "CPU5", "GPU0", "GPU1"]
cpu_procs = ["CPU0", "CPU1", "CPU2",
"CPU3", "CPU4", "CPU5"]
for i in range(8):
self.plugin = CpuPluginWrapper()
self.plugin.run_process("data", "out", all_procs, i)
if i < 6:
self.assertEqual(self.plugin.processes, cpu_procs)
self.assertEqual(self.plugin.process_number, i)
else:
self.assertEqual(self.plugin.processes, None)
self.assertEqual(self.plugin.process_number, None)
class TimeseriesFieldCorrectionsTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.timeseries_field_corrections"
class MedianFilterTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.median_filter"
class SimpleReconTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.simple_recon"
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""Dalite XBlock utils."""
from lazy.lazy import lazy
def _(text): # pylint: disable=invalid-name
"""
Make '_' a no-op so we can scrape strings.
:return text
"""
return text
# pylint: disable=protected-access
class FieldValuesContextManager(object):
"""
Allow using bound methods as XBlock field values provider.
Black wizardy to workaround the fact that field values can be callable, but that callable should be
parameterless, and we need current XBlock to get a list of values
"""
def __init__(self, block, field_name, field_values_callback):
"""
Initialize FieldValuesContextManager.
:param XBlock block: XBlock containing field to wrap
:param string field_name: Target field name
:param () -> list[Any] field_values_callback: Values provider callback (can be bound or unbound method)
"""
self._block = block
self._field_name = field_name
self._callback = field_values_callback
self._old_values_value = None
@lazy
def field(self):
"""
Return field descriptor to wrap.
:rtype: xblock.fields.Field
"""
return self._block.fields[self._field_name]
def __enter__(self):
"""Enter context managed-section."""
self._old_values_value = self.field.values
self.field._values = self._callback
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Exit from context managed-section.
:param type|None exc_type: Type of exception thrown or None
:param Exception|None exc_type: Exception thrown or None
:param exc_tb: Exception traceback or None
:rtype: bool
:returns: True if exception should be suppressed, False otherwise
"""
self.field._values = self._old_values_value
return False | unknown | codeparrot/codeparrot-clean | ||
//Copyright (c) 2006-2008 Emil Dotchevski and Reverge Studios, Inc.
//Distributed under the Boost Software License, Version 1.0. (See accompanying
//file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_EXCEPTION_316FDA946C0D11DEA9CBAE5255D89593
#define BOOST_EXCEPTION_316FDA946C0D11DEA9CBAE5255D89593
#include <boost/config.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/exception/error_info.hpp>
#include <boost/exception/exception.hpp>
#include <boost/exception/get_error_info.hpp>
#include <boost/exception/info.hpp>
#include <boost/exception/info_tuple.hpp>
#include <boost/exception/errinfo_api_function.hpp>
#include <boost/exception/errinfo_at_line.hpp>
#include <boost/exception/errinfo_errno.hpp>
#include <boost/exception/errinfo_file_handle.hpp>
#include <boost/exception/errinfo_file_name.hpp>
#include <boost/exception/errinfo_file_open_mode.hpp>
#include <boost/exception/errinfo_type_info_name.hpp>
#ifndef BOOST_NO_EXCEPTIONS
#include <boost/exception/errinfo_nested_exception.hpp>
#include <boost/exception_ptr.hpp>
#endif
#endif | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/exception/all.hpp |
import re
import json
import marshal
import pickle
import tempfile
import unittest
from io import BytesIO
from datetime import datetime
from warnings import catch_warnings, filterwarnings
import lxml.etree
from itemadapter import ItemAdapter
from scrapy.item import Item, Field
from scrapy.utils.python import to_unicode
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.exporters import (
BaseItemExporter, PprintItemExporter, PickleItemExporter, CsvItemExporter,
XmlItemExporter, JsonLinesItemExporter, JsonItemExporter,
PythonItemExporter, MarshalItemExporter
)
class TestItem(Item):
name = Field()
age = Field()
def custom_serializer(value):
return str(int(value) + 2)
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
try:
from dataclasses import make_dataclass, field
except ImportError:
TestDataClass = None
CustomFieldDataclass = None
else:
TestDataClass = make_dataclass("TestDataClass", [("name", str), ("age", int)])
CustomFieldDataclass = make_dataclass(
"CustomFieldDataclass",
[("name", str), ("age", int, field(metadata={"serializer": custom_serializer}))]
)
class BaseItemExporterTest(unittest.TestCase):
item_class = TestItem
custom_field_item_class = CustomFieldItem
def setUp(self):
if self.item_class is None:
raise unittest.SkipTest("item class is None")
self.i = self.item_class(name='John\xa3', age='22')
self.output = BytesIO()
self.ie = self._get_exporter()
def _get_exporter(self, **kwargs):
return BaseItemExporter(**kwargs)
def _check_output(self):
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = to_unicode(v)
self.assertEqual(self.i, self.item_class(**exported_dict))
def _get_nonstring_types_item(self):
return {
'boolean': False,
'number': 22,
'time': datetime(2015, 1, 1, 1, 1, 1),
'float': 3.14,
}
def assertItemExportWorks(self, item):
self.ie.start_exporting()
try:
self.ie.export_item(item)
except NotImplementedError:
if self.ie.__class__ is not BaseItemExporter:
raise
self.ie.finish_exporting()
self._check_output()
def test_export_item(self):
self.assertItemExportWorks(self.i)
def test_export_dict_item(self):
self.assertItemExportWorks(ItemAdapter(self.i).asdict())
def test_serialize_field(self):
a = ItemAdapter(self.i)
res = self.ie.serialize_field(a.get_field_meta('name'), 'name', a['name'])
self.assertEqual(res, 'John\xa3')
res = self.ie.serialize_field(a.get_field_meta('age'), 'age', a['age'])
self.assertEqual(res, '22')
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=['name'])
self.assertEqual(list(ie._get_serialized_fields(self.i)), [('name', 'John\xa3')])
ie = self._get_exporter(fields_to_export=['name'], encoding='latin-1')
_, name = list(ie._get_serialized_fields(self.i))[0]
assert isinstance(name, str)
self.assertEqual(name, 'John\xa3')
def test_field_custom_serializer(self):
i = self.custom_field_item_class(name='John\xa3', age='22')
a = ItemAdapter(i)
ie = self._get_exporter()
self.assertEqual(ie.serialize_field(a.get_field_meta('name'), 'name', a['name']), 'John\xa3')
self.assertEqual(ie.serialize_field(a.get_field_meta('age'), 'age', a['age']), '24')
class BaseItemExporterDataclassTest(BaseItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class PythonItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PythonItemExporter(binary=False, **kwargs)
def test_invalid_option(self):
with self.assertRaisesRegex(TypeError, "Unexpected options: invalid_option"):
PythonItemExporter(invalid_option='something')
def test_nested_item(self):
i1 = self.item_class(name='Joseph', age='22')
i2 = dict(name='Maria', age=i1)
i3 = self.item_class(name='Jesus', age=i2)
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(type(exported), dict)
self.assertEqual(
exported,
{'age': {'age': {'age': '22', 'name': 'Joseph'}, 'name': 'Maria'}, 'name': 'Jesus'}
)
self.assertEqual(type(exported['age']), dict)
self.assertEqual(type(exported['age']['age']), dict)
def test_export_list(self):
i1 = self.item_class(name='Joseph', age='22')
i2 = self.item_class(name='Maria', age=[i1])
i3 = self.item_class(name='Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(
exported,
{'age': [{'age': [{'age': '22', 'name': 'Joseph'}], 'name': 'Maria'}], 'name': 'Jesus'}
)
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
def test_export_item_dict_list(self):
i1 = self.item_class(name='Joseph', age='22')
i2 = dict(name='Maria', age=[i1])
i3 = self.item_class(name='Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(
exported,
{'age': [{'age': [{'age': '22', 'name': 'Joseph'}], 'name': 'Maria'}], 'name': 'Jesus'}
)
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
def test_export_binary(self):
with catch_warnings():
filterwarnings('ignore', category=ScrapyDeprecationWarning)
exporter = PythonItemExporter(binary=True)
value = self.item_class(name='John\xa3', age='22')
expected = {b'name': b'John\xc2\xa3', b'age': b'22'}
self.assertEqual(expected, exporter.export_item(value))
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
ie = self._get_exporter()
exported = ie.export_item(item)
self.assertEqual(exported, item)
class PythonItemExporterDataclassTest(PythonItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class PprintItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(eval(self.output.getvalue()))
class PprintItemExporterDataclassTest(PprintItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class PickleItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = self.item_class(name='hello', age='world')
i2 = self.item_class(name='bye', age='world')
f = BytesIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
f.seek(0)
self.assertEqual(self.item_class(**pickle.load(f)), i1)
self.assertEqual(self.item_class(**pickle.load(f)), i2)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
fp = BytesIO()
ie = PickleItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
self.assertEqual(pickle.loads(fp.getvalue()), item)
class PickleItemExporterDataclassTest(PickleItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class MarshalItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
self.output = tempfile.TemporaryFile()
return MarshalItemExporter(self.output, **kwargs)
def _check_output(self):
self.output.seek(0)
self._assert_expected_item(marshal.load(self.output))
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
item.pop('time') # datetime is not marshallable
fp = tempfile.TemporaryFile()
ie = MarshalItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
fp.seek(0)
self.assertEqual(marshal.load(fp), item)
class MarshalItemExporterDataclassTest(MarshalItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class CsvItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return CsvItemExporter(self.output, **kwargs)
def assertCsvEqual(self, first, second, msg=None):
def split_csv(csv):
return [
sorted(re.split(r"(,|\s+)", line))
for line in to_unicode(csv).splitlines(True)
]
return self.assertEqual(split_csv(first), split_csv(second), msg=msg)
def _check_output(self):
self.assertCsvEqual(to_unicode(self.output.getvalue()), 'age,name\r\n22,John\xa3\r\n')
def assertExportResult(self, item, expected, **kwargs):
fp = BytesIO()
ie = CsvItemExporter(fp, **kwargs)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
self.assertCsvEqual(fp.getvalue(), expected)
def test_header_export_all(self):
self.assertExportResult(
item=self.i,
fields_to_export=ItemAdapter(self.i).field_names(),
expected=b'age,name\r\n22,John\xc2\xa3\r\n',
)
def test_header_export_all_dict(self):
self.assertExportResult(
item=ItemAdapter(self.i).asdict(),
expected=b'age,name\r\n22,John\xc2\xa3\r\n',
)
def test_header_export_single_field(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
self.assertExportResult(
item=item,
fields_to_export=['age'],
expected=b'age\r\n22\r\n',
)
def test_header_export_two_items(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
output = BytesIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.export_item(item)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(),
b'age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n')
def test_header_no_header_line(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
self.assertExportResult(
item=item,
include_headers_line=False,
expected=b'22,John\xc2\xa3\r\n',
)
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
for cls in TestItem2, dict:
self.assertExportResult(
item=cls(name='John', friends=['Mary', 'Paul']),
include_headers_line=False,
expected='"Mary,Paul",John\r\n',
)
def test_join_multivalue_not_strings(self):
self.assertExportResult(
item=dict(name='John', friends=[4, 8]),
include_headers_line=False,
expected='"[4, 8]",John\r\n',
)
def test_nonstring_types_item(self):
self.assertExportResult(
item=self._get_nonstring_types_item(),
include_headers_line=False,
expected='22,False,3.14,2015-01-01 01:01:01\r\n'
)
def test_errors_default(self):
with self.assertRaises(UnicodeEncodeError):
self.assertExportResult(
item=dict(text=u'W\u0275\u200Brd'),
expected=None,
encoding='windows-1251',
)
def test_errors_xmlcharrefreplace(self):
self.assertExportResult(
item=dict(text=u'W\u0275\u200Brd'),
include_headers_line=False,
expected='Wɵ​rd\r\n',
encoding='windows-1251',
errors='xmlcharrefreplace',
)
class CsvItemExporterDataclassTest(CsvItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class XmlItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def assertXmlEquivalent(self, first, second, msg=None):
def xmltuple(elem):
children = list(elem.iterchildren())
if children:
return [(child.tag, sorted(xmltuple(child))) for child in children]
else:
return [(elem.tag, [(elem.text, ())])]
def xmlsplit(xmlcontent):
doc = lxml.etree.fromstring(xmlcontent)
return xmltuple(doc)
return self.assertEqual(xmlsplit(first), xmlsplit(second), msg)
def assertExportResult(self, item, expected_value):
fp = BytesIO()
ie = XmlItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
self.assertXmlEquivalent(fp.getvalue(), expected_value)
def _check_output(self):
expected_value = (
b'<?xml version="1.0" encoding="utf-8"?>\n'
b'<items><item><age>22</age><name>John\xc2\xa3</name></item></items>'
)
self.assertXmlEquivalent(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
self.assertExportResult(
self.item_class(name=['John\xa3', 'Doe'], age=[1, 2, 3]),
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<name><value>John\xc2\xa3</value><value>Doe</value></name>
<age><value>1</value><value>2</value><value>3</value></age>
</item>
</items>
"""
)
def test_nested_item(self):
i1 = dict(name='foo\xa3hoo', age='22')
i2 = dict(name='bar', age=i1)
i3 = self.item_class(name='buz', age=i2)
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<age>
<age>22</age>
<name>foo\xc2\xa3hoo</name>
</age>
<name>bar</name>
</age>
<name>buz</name>
</item>
</items>
"""
)
def test_nested_list_item(self):
i1 = dict(name='foo')
i2 = dict(name='bar', v2={"egg": ["spam"]})
i3 = self.item_class(name='buz', age=[i1, i2])
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<value><name>foo</name></value>
<value><name>bar</name><v2><egg><value>spam</value></egg></v2></value>
</age>
<name>buz</name>
</item>
</items>
"""
)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.assertExportResult(
item,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<float>3.14</float>
<boolean>False</boolean>
<number>22</number>
<time>2015-01-01 01:01:01</time>
</item>
</items>
"""
)
class XmlItemExporterDataclassTest(XmlItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class JsonLinesItemExporterTest(BaseItemExporterTest):
_expected_nested = {'name': 'Jesus', 'age': {'name': 'Maria', 'age': {'name': 'Joseph', 'age': '22'}}}
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(to_unicode(self.output.getvalue().strip()))
self.assertEqual(exported, ItemAdapter(self.i).asdict())
def test_nested_item(self):
i1 = self.item_class(name='Joseph', age='22')
i2 = dict(name='Maria', age=i1)
i3 = self.item_class(name='Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
self.assertEqual(exported, self._expected_nested)
def test_extra_keywords(self):
self.ie = self._get_exporter(sort_keys=True)
self.test_export_item()
self._check_output()
self.assertRaises(TypeError, self._get_exporter, foo_unknown_keyword_bar=True)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
item['time'] = str(item['time'])
self.assertEqual(exported, item)
class JsonLinesItemExporterDataclassTest(JsonLinesItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class JsonItemExporterTest(JsonLinesItemExporterTest):
_expected_nested = [JsonLinesItemExporterTest._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(to_unicode(self.output.getvalue().strip()))
self.assertEqual(exported, [ItemAdapter(self.i).asdict()])
def assertTwoItemsExported(self, item):
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.export_item(item)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
self.assertEqual(exported, [ItemAdapter(item).asdict(), ItemAdapter(item).asdict()])
def test_two_items(self):
self.assertTwoItemsExported(self.i)
def test_two_dict_items(self):
self.assertTwoItemsExported(ItemAdapter(self.i).asdict())
def test_nested_item(self):
i1 = self.item_class(name='Joseph\xa3', age='22')
i2 = self.item_class(name='Maria', age=i1)
i3 = self.item_class(name='Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {'name': 'Jesus', 'age': {'name': 'Maria', 'age': ItemAdapter(i1).asdict()}}
self.assertEqual(exported, [expected])
def test_nested_dict_item(self):
i1 = dict(name='Joseph\xa3', age='22')
i2 = self.item_class(name='Maria', age=i1)
i3 = dict(name='Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {'name': 'Jesus', 'age': {'name': 'Maria', 'age': i1}}
self.assertEqual(exported, [expected])
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
item['time'] = str(item['time'])
self.assertEqual(exported, [item])
class JsonItemExporterDataclassTest(JsonItemExporterTest):
item_class = TestDataClass
custom_field_item_class = CustomFieldDataclass
class CustomExporterItemTest(unittest.TestCase):
item_class = TestItem
def setUp(self):
if self.item_class is None:
raise unittest.SkipTest("item class is None")
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == 'age':
return str(int(value) + 1)
else:
return super().serialize_field(field, name, value)
i = self.item_class(name='John', age='22')
a = ItemAdapter(i)
ie = CustomItemExporter()
self.assertEqual(ie.serialize_field(a.get_field_meta('name'), 'name', a['name']), 'John')
self.assertEqual(ie.serialize_field(a.get_field_meta('age'), 'age', a['age']), '23')
i2 = {'name': 'John', 'age': '22'}
self.assertEqual(ie.serialize_field({}, 'name', i2['name']), 'John')
self.assertEqual(ie.serialize_field({}, 'age', i2['age']), '23')
class CustomExporterDataclassTest(CustomExporterItemTest):
item_class = TestDataClass
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
##
## P(icture)i(n)g(raphics) renderer
##
from Renderer import Renderer
from enigma import eVideoWidget, getDesktop
from Screens.PictureInPicture import PipPigMode
class Pig(Renderer):
def __init__(self):
Renderer.__init__(self)
self.Position = self.Size = None
self.hidePip = True
GUI_WIDGET = eVideoWidget
def postWidgetCreate(self, instance):
desk = getDesktop(0)
instance.setDecoder(0)
instance.setFBSize(desk.size())
def applySkin(self, desktop, parent):
attribs = self.skinAttributes[:]
for (attrib, value) in self.skinAttributes:
if attrib == "hidePip":
self.hidePip = value == "1"
attribs.remove((attrib,value))
break
self.skinAttributes = attribs
ret = Renderer.applySkin(self, desktop, parent)
if ret:
self.Position = self.instance.position()
self.Size = self.instance.size()
return ret
def onShow(self):
if self.instance:
if self.Size:
self.instance.resize(self.Size)
if self.Position:
self.instance.move(self.Position)
self.hidePip and PipPigMode(True)
def onHide(self):
if self.instance:
self.preWidgetRemove(self.instance)
self.hidePip and PipPigMode(False) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the Chrome WebUI presubmit scripts on our test javascript.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
def GetPathsToPrepend(input_api):
web_dev_style_path = input_api.os_path.join(
input_api.change.RepositoryRoot(),
'chrome',
'browser',
'resources')
return [input_api.PresubmitLocalPath(), web_dev_style_path]
def RunWithPrependedPath(prepended_path, fn, *args):
import sys
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def _CommonChecks(input_api, output_api):
resources = input_api.PresubmitLocalPath()
def _html_css_js_resource(p):
return p.endswith(('.js')) and p.startswith(resources)
def is_resource(maybe_resource):
return _html_css_js_resource(maybe_resource.AbsoluteLocalPath())
from web_dev_style import js_checker
results = []
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
return results | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import print_function
import re
import ast
import subprocess
import sys
DEBUG = False
def version(version_file):
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open(version_file, 'rb') as f:
ver = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return ver
def commit_for_release(version_file, ver):
cmd = ['git', 'reset']
print(' '.join(cmd))
subprocess.check_output(cmd)
cmd = ['git', 'add', version_file]
print(' '.join(cmd))
subprocess.check_output(cmd)
cmd = ['git', 'commit', '--message', 'Releasing version %s' % ver]
print(' '.join(cmd))
subprocess.check_output(cmd)
def create_git_tag(tag_name):
cmd = ['git', 'tag', tag_name]
print(' '.join(cmd))
subprocess.check_output(cmd)
def register_with_pypi():
cmd = ['python', 'setup.py', 'register']
print(' '.join(cmd))
subprocess.check_output(cmd)
def create_source_tarball():
cmd = ['python', 'setup.py', 'sdist']
print(' '.join(cmd))
subprocess.check_output(cmd)
def push_to_github():
cmd = ['git', 'push', 'origin', 'master']
print(' '.join(cmd))
subprocess.check_output(cmd)
def push_tags_to_github():
cmd = ['git', 'push', '--tags', 'origin']
print(' '.join(cmd))
subprocess.check_output(cmd)
def checklist(questions):
for question in questions:
choice = raw_input(question + ' (y/N)')
if choice.lower() != 'y':
sys.exit(1)
if __name__ == '__main__':
if DEBUG:
subprocess.check_output = lambda x: x
checks = ['Have you created the debian package?',
'Have you updated the AUTHORS file?',
]
checklist(checks)
ver = version('mycli/__init__.py')
print('Releasing Version:', ver)
choice = raw_input('Are you sure? (y/N)')
if choice.lower() != 'y':
sys.exit(1)
commit_for_release('mycli/__init__.py', ver)
create_git_tag('v%s' % ver)
register_with_pypi()
create_source_tarball()
push_to_github()
push_tags_to_github() | unknown | codeparrot/codeparrot-clean | ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Unittest for export to VCard
"""
import unittest
import time
import subprocess
import sys
import os
import xml.etree.ElementTree as ET
from ...lib.libgrampsxml import GRAMPS_XML_VERSION
from gramps.version import VERSION
from ..exportvcard import VCardWriter
class VCardCheck(unittest.TestCase):
def setUp(self):
self.expect = ["BEGIN:VCARD", "VERSION:3.0",
"PRODID:-//Gramps//NONSGML Gramps %s//EN" % VERSION,
"FN:Lastname", "N:Lastname;;;;",
"SORT-STRING:" + "Lastname".ljust(55), "END:VCARD"]
date = time.localtime(time.time())
self.input_list = ["BEGIN:VCARD", "VERSION:3.0", "FN:Lastname",
"N:Lastname;;;;", "END:VCARD"]
self.header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE database PUBLIC "-//GRAMPS//DTD GRAMPS XML %s//EN"
"http://gramps-project.org/xml/%s/grampsxml.dtd">""" % \
(GRAMPS_XML_VERSION, GRAMPS_XML_VERSION)
strng = """<database xmlns="http://gramps-project.org/xml/%s/">
<header>
<created date="%04d-%02d-%02d" version="%s"/>
<researcher/>
</header>
<people>
<person id="I0000" handle="_0000">
<name type="Birth Name">
<surname>Lastname</surname>
</name>
</person>
</people>
</database>""" % \
(GRAMPS_XML_VERSION, date[0], date[1], date[2], VERSION)
namespace = "http://gramps-project.org/xml/%s/" % GRAMPS_XML_VERSION
ET.register_namespace("", namespace)
self.database = ET.XML(strng)
self.people = self.database[1]
self.person = self.people[0]
self.name = self.person[0]
self.lastname = self.name[0]
def do_case(self, input_doc, expect_str, debug=False):
if debug:
print(ET.tostring(input_doc))
gcmd = [sys.executable, 'Gramps.py',
'-i', '-', '-f', 'gramps',
'-e', '-', '-f', 'vcf']
process = subprocess.Popen(gcmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ)
input_str = (self.header.encode('utf-8') +
ET.tostring(input_doc, encoding='utf-8'))
result_str, err_str = process.communicate(input_str)
separator = '\r' + os.linesep
expect_str = separator.join(expect_str) + (separator * 2)
if debug:
print(err_str)
print(result_str)
print(expect_str)
self.assertEqual(result_str, expect_str.encode('utf-8'))
def test_base(self):
self.do_case(self.database,
self.expect)
def test_esc_string_none(self):
self.assertEqual(VCardWriter.esc("nothing"), "nothing")
def test_esc_string_all(self):
self.assertEqual(VCardWriter.esc("backslash\\_comma,_semicolon;"),
"backslash\\\\_comma\\,_semicolon\\;")
def test_esc_string_list(self):
self.assertEqual(VCardWriter.esc(["comma,", "semicolon;"]),
["comma\\,", "semicolon\\;"])
def test_esc_string_tuple(self):
self.assertEqual(VCardWriter.esc(("comma,", "semicolon;")),
("comma\\,", "semicolon\\;"))
def test_esc_string_wrongtype(self):
self.assertRaises(TypeError, VCardWriter.esc,
{"comma,":"semicolon;"})
def test_write_formatted_name_title(self):
ET.SubElement(self.name, "title").text = 'Sir.'
self.expect[3] = "FN:Sir. Lastname"
self.expect[4] = "N:Lastname;;;Sir.;"
self.do_case(self.database, self.expect)
def test_write_name_multiple_surname(self):
self.lastname.text = "Oranje"
self.lastname.set("prefix", "van")
ET.SubElement(self.name, "surname").text = "Nassau"
self.expect[3] = "FN:van Oranje Nassau"
self.expect[4] = "N:van Oranje,Nassau;;;;"
self.expect[5] = "SORT-STRING:" + "Oranje".ljust(55)
self.do_case(self.database, self.expect)
def test_write_name_callname(self):
# callname not in first names!
ET.SubElement(self.name, "first").text = "B C"
ET.SubElement(self.name, "call").text = "A"
self.expect[3] = "FN:B C Lastname"
self.expect[4] = "N:Lastname;A;B,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_callname_in_addnames(self):
ET.SubElement(self.name, "first").text = "A B C"
ET.SubElement(self.name, "call").text = "B"
self.expect[3] = "FN:A B C Lastname"
self.expect[4] = "N:Lastname;B;A,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_no_callname(self):
ET.SubElement(self.name, "first").text = "A B C"
self.expect[3] = "FN:A B C Lastname"
self.expect[4] = "N:Lastname;A;B,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_no_additional_names(self):
ET.SubElement(self.name, "first").text = "A"
self.expect[3] = "FN:A Lastname"
self.expect[4] = "N:Lastname;A;;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_honprefix(self):
ET.SubElement(self.name, "title").text = "Sir"
self.expect[3] = "FN:Sir Lastname"
self.expect[4] = "N:Lastname;;;Sir;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(55)
self.do_case(self.database, self.expect)
def test_write_name_honsuffix(self):
ET.SubElement(self.name, "suffix").text = "Jr."
self.expect[3] = "FN:Lastname\\, Jr."
self.expect[4] = "N:Lastname;;;;Jr."
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(55)+ "Jr."
self.do_case(self.database, self.expect)
def test_nicknames_regular(self):
attribs = {'type': 'Birth Name', 'alt': '1'}
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'Nick'
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'N.'
self.expect.insert(6, "NICKNAME:Nick,N.")
self.do_case(self.database, self.expect)
def test_nicknames_primary_nick(self):
ET.SubElement(self.name, 'nick').text = 'Nick'
attribs = {'type': 'Birth Name', 'alt': '1'}
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'N.'
self.expect.insert(6, "NICKNAME:Nick,N.")
self.do_case(self.database, self.expect)
def test_write_birthdate_regular(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
ET.SubElement(event, 'dateval', val='2001-02-28')
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "BDAY:2001-02-28")
self.do_case(self.database, self.expect)
def test_write_birthdate_empty(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birhtdate_textonly(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
ET.SubElement(event, 'dateval', val='Christmas 2001')
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birthdate_span(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'start': '2001-02-28', 'stop': '2002-02-28'}
ET.SubElement(event, 'datespan', attrib=attribs)
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birthdate_range(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'start': '2001-02-28', 'stop': '2002-02-28'}
ET.SubElement(event, 'daterange', attrib=attribs)
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_addresses_regular(self):
address = ET.SubElement(self.person, 'address')
ET.SubElement(address, 'street').text = 'pobox bis street'
ET.SubElement(address, 'city').text = 'place'
ET.SubElement(address, 'country').text = 'country'
ET.SubElement(address, 'state').text = 'province'
ET.SubElement(address, 'postal').text = 'zip'
self.expect.insert(6, "ADR:;;pobox bis street;place;province;zip;country")
self.do_case(self.database, self.expect)
def test_write_addresses_phone(self):
address = ET.SubElement(self.person, 'address')
ET.SubElement(address, 'phone').text = '01234-56789'
self.expect.insert(6, "TEL:01234-56789")
self.do_case(self.database, self.expect)
def test_write_urls_email(self):
attribs = {'type': 'E-mail', 'href': 'me@example.com'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "EMAIL:me@example.com")
self.do_case(self.database, self.expect)
def test_write_urls_emial_mailto(self):
attribs = {'type': 'E-mail', 'href': 'mailto:me@example.com'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "EMAIL:me@example.com")
self.do_case(self.database, self.expect)
def test_write_urls_url(self):
attribs = {'type': 'Web Home', 'href': 'http://www.example.org'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "URL:http://www.example.org")
self.do_case(self.database, self.expect)
def test_write_occupation_regular(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'description').text = 'carpenter'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "ROLE:carpenter")
self.do_case(self.database, self.expect)
def test_write_occupation_lastdate(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'dateval', val='2011-02-28')
ET.SubElement(event, 'description').text = 'foreman'
attribs = {'handle': '_e0001', 'id': 'E0001'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'dateval', val='2000-09-21')
ET.SubElement(event, 'description').text = 'carpenter'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
attribs = {'hlink': '_e0001', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "ROLE:foreman")
self.do_case(self.database, self.expect)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import sys
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_server_manual():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_channel.emit('lolita', tuple())
event = client_channel.recv()
assert list(event.args) == [42]
client_channel.close()
client_channel = client.channel()
client_channel.emit('add', (1, 2))
event = client_channel.recv()
assert list(event.args) == [3]
client_channel.close()
srv.stop()
def test_client_server():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client()
client.connect(endpoint)
print client.lolita()
assert client.lolita() == 42
print client.add(1, 4)
assert client.add(1, 4) == 5
def test_client_server_client_timeout():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
gevent.sleep(10)
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
assert_raises(zerorpc.TimeoutExpired, client.add, 1, 4)
else:
with assert_raises(zerorpc.TimeoutExpired):
print client.add(1, 4)
client.close()
srv.close()
def test_client_server_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_something(self, a):
return a[4]
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_something(42)
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_something(42)
assert client.raise_something(range(5)) == 4
client.close()
srv.close()
def test_client_server_detailed_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_error(self):
raise RuntimeError('oops!')
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_error()
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_error()
try:
client.raise_error()
except zerorpc.RemoteError as e:
print 'got that:', e
print 'name', e.name
print 'msg', e.msg
assert e.name == 'RuntimeError'
assert e.msg == 'oops!'
client.close()
srv.close()
def test_exception_compat_v1():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
pass
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
rpccall = client.channel()
rpccall.emit('donotexist', tuple())
event = rpccall.recv()
print event
assert event.name == 'ERR'
(name, msg, tb) = event.args
print 'detailed error', name, msg, tb
assert name == 'NameError'
assert msg == 'donotexist'
rpccall = client.channel()
rpccall.emit('donotexist', tuple(), xheader=dict(v=1))
event = rpccall.recv()
print event
assert event.name == 'ERR'
(msg,) = event.args
print 'msg only', msg
assert msg == "NameError('donotexist',)"
client_events.close()
srv.close()
def test_removed_unscriptable_error_format_args_spec():
class MySrv(zerorpc.Server):
pass
srv = MySrv()
return_value = srv._format_args_spec(None)
assert return_value is None | unknown | codeparrot/codeparrot-clean | ||
<docs-decorative-header title="Accordion">
</docs-decorative-header>
<docs-pill-row>
<docs-pill href="https://www.w3.org/WAI/ARIA/apg/patterns/accordion/" title="Accordion ARIA pattern"/>
<docs-pill href="/api?query=accordion#angular_aria_accordion" title="Accordion API Reference"/>
</docs-pill-row>
## Overview
An accordion organizes related content into expandable and collapsible sections, reducing page scrolling and helping users focus on relevant information. Each section has a trigger button and a content panel. Clicking a trigger toggles the visibility of its associated panel.
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.css"/>
</docs-code-multifile>
## Usage
Accordions work well for organizing content into logical groups where users typically need to view one section at a time.
**Use accordions when:**
- Displaying FAQs with multiple questions and answers
- Organizing long forms into manageable sections
- Reducing scrolling on content-heavy pages
- Progressively disclosing related information
**Avoid accordions when:**
- Building navigation menus (use the [Menu](guide/aria/menu) component instead)
- Creating tabbed interfaces (use the [Tabs](guide/aria/tabs) component instead)
- Showing a single collapsible section (use a disclosure pattern instead)
- Users need to see multiple sections simultaneously (consider a different layout)
## Features
- **Expansion modes** - Control whether one or multiple panels can be open at the same time
- **Keyboard navigation** - Navigate between triggers using arrow keys, Home, and End
- **Lazy rendering** - Content is only created when a panel first expands, improving initial load performance
- **Disabled states** - Disable the entire group or individual triggers
- **Focus management** - Control whether disabled items can receive keyboard focus
- **Programmatic control** - Expand, collapse, or toggle panels from your component code
- **RTL support** - Automatic support for right-to-left languages
## Examples
### Single expansion mode
Set `[multiExpandable]="false"` to allow only one panel to be open at a time. Opening a new panel automatically closes any previously open panel.
<docs-tab-group>
<docs-tab label="Basic">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/single-expansion/basic/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Material">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/single-expansion/material/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/single-expansion/material/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/single-expansion/material/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/single-expansion/material/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Retro">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/single-expansion/retro/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/single-expansion/retro/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/single-expansion/retro/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/single-expansion/retro/app/app.css"/>
</docs-code-multifile>
</docs-tab>
</docs-tab-group>
This mode works well for FAQs or situations where you want users to focus on one answer at a time.
### Multiple expansion mode
Set `[multiExpandable]="true"` to allow multiple panels to be open simultaneously. Users can expand as many panels as needed without closing others.
<docs-tab-group>
<docs-tab label="Basic">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/multi-expansion/basic/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/basic/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/multi-expansion/basic/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/basic/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Material">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/multi-expansion/material/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/material/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/multi-expansion/material/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/material/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Retro">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/multi-expansion/retro/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/retro/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/multi-expansion/retro/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/multi-expansion/retro/app/app.css"/>
</docs-code-multifile>
</docs-tab>
</docs-tab-group>
This mode is useful for form sections or when users need to compare content across multiple panels.
NOTE: The `multiExpandable` input defaults to `true`. Set it to `false` explicitly if you want single expansion behavior.
### Disabled accordion items
Disable specific triggers using the `disabled` input. Control how disabled items behave during keyboard navigation using the `softDisabled` input on the accordion group.
<docs-tab-group>
<docs-tab label="Basic">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/disabled-focusable/basic/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/basic/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/basic/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/basic/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Material">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/disabled-focusable/material/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/material/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/material/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/material/app/app.css"/>
</docs-code-multifile>
</docs-tab>
<docs-tab label="Retro">
<docs-code-multifile preview hideCode path="adev/src/content/examples/aria/accordion/src/disabled-focusable/retro/app/app.ts">
<docs-code header="TS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/retro/app/app.ts"/>
<docs-code header="HTML" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/retro/app/app.html"/>
<docs-code header="CSS" path="adev/src/content/examples/aria/accordion/src/disabled-focusable/retro/app/app.css"/>
</docs-code-multifile>
</docs-tab>
</docs-tab-group>
When `[softDisabled]="true"` (the default), disabled items can receive focus but cannot be activated. When `[softDisabled]="false"`, disabled items are skipped entirely during keyboard navigation.
### Lazy content rendering
Use the `ngAccordionContent` directive on an `ng-template` to defer rendering content until the panel first expands. This improves performance for accordions with heavy content like images, charts, or complex components.
```angular-html
<div ngAccordionGroup>
<div>
<button ngAccordionTrigger panelId="item-1">Trigger Text</button>
<div ngAccordionPanel panelId="item-1">
<ng-template ngAccordionContent>
<!-- This content only renders when the panel first opens -->
<img src="large-image.jpg" alt="Description" />
<app-expensive-component />
</ng-template>
</div>
</div>
</div>
```
By default, content remains in the DOM after the panel collapses. Set `[preserveContent]="false"` to remove the content from the DOM when the panel closes.
## APIs
### AccordionGroup
The container directive that manages keyboard navigation and expansion behavior for a group of accordion items.
#### Inputs
| Property | Type | Default | Description |
| ----------------- | --------- | ------- | ------------------------------------------------------------------------- |
| `disabled` | `boolean` | `false` | Disables all triggers in the group |
| `multiExpandable` | `boolean` | `true` | Whether multiple panels can be expanded simultaneously |
| `softDisabled` | `boolean` | `true` | When `true`, disabled items are focusable. When `false`, they are skipped |
| `wrap` | `boolean` | `false` | Whether keyboard navigation wraps from last to first item and vice versa |
#### Methods
| Method | Parameters | Description |
| ------------- | ---------- | ---------------------------------------------------------------- |
| `expandAll` | none | Expands all panels (only works when `multiExpandable` is `true`) |
| `collapseAll` | none | Collapses all panels |
### AccordionTrigger
The directive applied to the button element that toggles panel visibility.
#### Inputs
| Property | Type | Default | Description |
| ---------- | --------- | ------- | -------------------------------------------------------------- |
| `id` | `string` | auto | Unique identifier for the trigger |
| `panelId` | `string` | — | **Required.** Must match the `panelId` of the associated panel |
| `disabled` | `boolean` | `false` | Disables this trigger |
| `expanded` | `boolean` | `false` | Whether the panel is expanded (supports two-way binding) |
#### Signals
| Property | Type | Description |
| -------- | ----------------- | --------------------------------------- |
| `active` | `Signal<boolean>` | Whether the trigger currently has focus |
#### Methods
| Method | Parameters | Description |
| ---------- | ---------- | --------------------------------- |
| `expand` | none | Expands the associated panel |
| `collapse` | none | Collapses the associated panel |
| `toggle` | none | Toggles the panel expansion state |
### AccordionPanel
The directive applied to the element containing the collapsible content.
#### Inputs
| Property | Type | Default | Description |
| ----------------- | --------- | ------- | ---------------------------------------------------------------- |
| `id` | `string` | auto | Unique identifier for the panel |
| `panelId` | `string` | — | **Required.** Must match the `panelId` of the associated trigger |
| `preserveContent` | `boolean` | `true` | Whether to keep content in DOM after panel collapses |
#### Signals
| Property | Type | Description |
| --------- | ----------------- | --------------------------------------- |
| `visible` | `Signal<boolean>` | Whether the panel is currently expanded |
#### Methods
| Method | Parameters | Description |
| ---------- | ---------- | --------------------------- |
| `expand` | none | Expands this panel |
| `collapse` | none | Collapses this panel |
| `toggle` | none | Toggles the expansion state |
### AccordionContent
The structural directive applied to an `ng-template` inside an accordion panel to enable lazy rendering.
This directive has no inputs, outputs, or methods. Apply it to an `ng-template` element:
```angular-html
<div ngAccordionPanel panelId="item-1">
<ng-template ngAccordionContent>
<!-- Content here is lazily rendered -->
</ng-template>
</div>
``` | unknown | github | https://github.com/angular/angular | adev/src/content/guide/aria/accordion.md |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from collections.abc import Callable, Coroutine
from json import JSONDecodeError
from pathlib import Path
from typing import TYPE_CHECKING, Annotated, Any, cast
from urllib.parse import ParseResult, unquote, urljoin, urlparse
from fastapi import Depends, HTTPException, Request, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer, OAuth2PasswordBearer
from jwt import ExpiredSignatureError, InvalidTokenError
from sqlalchemy import or_, select
from sqlalchemy.orm import Session
from airflow.api_fastapi.app import get_auth_manager
from airflow.api_fastapi.auth.managers.base_auth_manager import (
COOKIE_NAME_JWT_TOKEN,
BaseAuthManager,
)
from airflow.api_fastapi.auth.managers.models.base_user import BaseUser
from airflow.api_fastapi.auth.managers.models.batch_apis import (
IsAuthorizedConnectionRequest,
IsAuthorizedPoolRequest,
IsAuthorizedVariableRequest,
)
from airflow.api_fastapi.auth.managers.models.resource_details import (
AccessView,
AssetAliasDetails,
AssetDetails,
ConfigurationDetails,
ConnectionDetails,
DagAccessEntity,
DagDetails,
PoolDetails,
VariableDetails,
)
from airflow.api_fastapi.common.db.common import SessionDep
from airflow.api_fastapi.core_api.base import OrmClause
from airflow.api_fastapi.core_api.datamodels.common import (
BulkAction,
BulkActionOnExistence,
BulkBody,
BulkCreateAction,
BulkDeleteAction,
BulkUpdateAction,
)
from airflow.api_fastapi.core_api.datamodels.connections import ConnectionBody
from airflow.api_fastapi.core_api.datamodels.pools import PoolBody
from airflow.api_fastapi.core_api.datamodels.variables import VariableBody
from airflow.configuration import conf
from airflow.models import Connection, Pool, Variable
from airflow.models.backfill import Backfill
from airflow.models.dag import DagModel, DagRun, DagTag
from airflow.models.dag_version import DagVersion
from airflow.models.dagwarning import DagWarning
from airflow.models.log import Log
from airflow.models.taskinstance import TaskInstance as TI
from airflow.models.team import Team
from airflow.models.xcom import XComModel
if TYPE_CHECKING:
from sqlalchemy.sql import Select
from airflow.api_fastapi.auth.managers.base_auth_manager import ResourceMethod
def auth_manager_from_app(request: Request) -> BaseAuthManager:
"""
FastAPI dependency resolver that returns the shared AuthManager instance from app.state.
This ensures that all API routes using AuthManager via dependency injection receive the same
singleton instance that was initialized at app startup.
"""
return request.app.state.auth_manager
AuthManagerDep = Annotated[BaseAuthManager, Depends(auth_manager_from_app)]
auth_description = (
"To authenticate Airflow API requests, clients must include a JWT (JSON Web Token) in "
"the Authorization header of each request. This token is used to verify the identity of "
"the client and ensure that they have the appropriate permissions to access the "
"requested resources. "
"You can use the endpoint ``POST /auth/token`` in order to generate a JWT token. "
"Upon successful authentication, the server will issue a JWT token that contains the necessary "
"information (such as user identity and scope) to authenticate subsequent requests. "
"To learn more about Airflow public API authentication, please read https://airflow.apache.org/docs/apache-airflow/stable/security/api.html."
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/token", description=auth_description, auto_error=False)
bearer_scheme = HTTPBearer(auto_error=False)
MAP_BULK_ACTION_TO_AUTH_METHOD: dict[BulkAction, ResourceMethod] = {
BulkAction.CREATE: "POST",
BulkAction.DELETE: "DELETE",
BulkAction.UPDATE: "PUT",
}
async def resolve_user_from_token(token_str: str | None) -> BaseUser:
if not token_str:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated")
try:
return await get_auth_manager().get_user_from_token(token_str)
except ExpiredSignatureError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Token Expired")
except InvalidTokenError:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid JWT token")
async def get_user(
request: Request,
oauth_token: str | None = Depends(oauth2_scheme),
bearer_credentials: HTTPAuthorizationCredentials | None = Depends(bearer_scheme),
) -> BaseUser:
# A user might have been already built by a middleware, if so, it is stored in `request.state.user`
user: BaseUser | None = getattr(request.state, "user", None)
if user:
return user
token_str: str | None
if bearer_credentials and bearer_credentials.scheme.lower() == "bearer":
token_str = bearer_credentials.credentials
elif oauth_token:
token_str = oauth_token
else:
token_str = request.cookies.get(COOKIE_NAME_JWT_TOKEN)
return await resolve_user_from_token(token_str)
GetUserDep = Annotated[BaseUser, Depends(get_user)]
def requires_access_dag(
method: ResourceMethod,
access_entity: DagAccessEntity | None = None,
param_dag_id: str | None = None,
) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
# Required for the closure to capture the dag_id but still be able to mutate it.
# Prevent from using a nonlocal statement causing test failures.
dag_id = param_dag_id
if dag_id is None:
dag_id = request.path_params.get("dag_id") or request.query_params.get("dag_id")
dag_id = dag_id if dag_id != "~" else None
team_name = DagModel.get_team_name(dag_id) if dag_id else None
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_dag(
method=method,
access_entity=access_entity,
details=DagDetails(id=dag_id, team_name=team_name),
user=user,
)
)
return inner
class PermittedDagFilter(OrmClause[set[str]]):
"""A parameter that filters the permitted dags for the user."""
def to_orm(self, select: Select) -> Select:
# self.value may be None (OrmClause holds Optional), ensure we pass an Iterable to in_
return select.where(DagModel.dag_id.in_(self.value or set()))
class PermittedDagRunFilter(PermittedDagFilter):
"""A parameter that filters the permitted dag runs for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(DagRun.dag_id.in_(self.value or set()))
class PermittedDagWarningFilter(PermittedDagFilter):
"""A parameter that filters the permitted dag warnings for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(DagWarning.dag_id.in_(self.value or set()))
class PermittedEventLogFilter(PermittedDagFilter):
"""A parameter that filters the permitted even logs for the user."""
def to_orm(self, select: Select) -> Select:
# Event Logs not related to Dags have dag_id as None and are always returned.
# return select.where(Log.dag_id.in_(self.value or set()) or Log.dag_id.is_(None))
return select.where(or_(Log.dag_id.in_(self.value or set()), Log.dag_id.is_(None)))
class PermittedTIFilter(PermittedDagFilter):
"""A parameter that filters the permitted task instances for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(TI.dag_id.in_(self.value or set()))
class PermittedXComFilter(PermittedDagFilter):
"""A parameter that filters the permitted XComs for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(XComModel.dag_id.in_(self.value or set()))
class PermittedTagFilter(PermittedDagFilter):
"""A parameter that filters the permitted dag tags for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(DagTag.dag_id.in_(self.value or set()))
class PermittedDagVersionFilter(PermittedDagFilter):
"""A parameter that filters the permitted dag versions for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(DagVersion.dag_id.in_(self.value or set()))
def permitted_dag_filter_factory(
method: ResourceMethod, filter_class=PermittedDagFilter
) -> Callable[[BaseUser, BaseAuthManager], PermittedDagFilter]:
"""
Create a callable for Depends in FastAPI that returns a filter of the permitted dags for the user.
:param method: whether filter readable or writable.
:return: The callable that can be used as Depends in FastAPI.
"""
def depends_permitted_dags_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedDagFilter:
authorized_dags: set[str] = auth_manager.get_authorized_dag_ids(user=user, method=method)
return filter_class(authorized_dags)
return depends_permitted_dags_filter
EditableDagsFilterDep = Annotated[PermittedDagFilter, Depends(permitted_dag_filter_factory("PUT"))]
ReadableDagsFilterDep = Annotated[PermittedDagFilter, Depends(permitted_dag_filter_factory("GET"))]
ReadableDagRunsFilterDep = Annotated[
PermittedDagRunFilter, Depends(permitted_dag_filter_factory("GET", PermittedDagRunFilter))
]
ReadableDagWarningsFilterDep = Annotated[
PermittedDagWarningFilter, Depends(permitted_dag_filter_factory("GET", PermittedDagWarningFilter))
]
ReadableTIFilterDep = Annotated[
PermittedTIFilter, Depends(permitted_dag_filter_factory("GET", PermittedTIFilter))
]
ReadableEventLogsFilterDep = Annotated[
PermittedTIFilter, Depends(permitted_dag_filter_factory("GET", PermittedEventLogFilter))
]
ReadableXComFilterDep = Annotated[
PermittedXComFilter, Depends(permitted_dag_filter_factory("GET", PermittedXComFilter))
]
ReadableTagsFilterDep = Annotated[
PermittedTagFilter, Depends(permitted_dag_filter_factory("GET", PermittedTagFilter))
]
ReadableDagVersionsFilterDep = Annotated[
PermittedDagVersionFilter, Depends(permitted_dag_filter_factory("GET", PermittedDagVersionFilter))
]
def requires_access_backfill(
method: ResourceMethod,
) -> Callable[[Request, BaseUser, Session], Coroutine[Any, Any, None]]:
"""Wrap ``requires_access_dag`` and extract the dag_id from the backfill_id."""
async def inner(
request: Request,
user: GetUserDep,
session: SessionDep,
) -> None:
dag_id = None
# Try to retrieve the dag_id from the backfill_id path param
backfill_id = request.path_params.get("backfill_id")
if backfill_id is not None and isinstance(backfill_id, int):
backfill = session.scalars(select(Backfill).where(Backfill.id == backfill_id)).one_or_none()
dag_id = backfill.dag_id if backfill else None
# Try to retrieve the dag_id from the request body (POST backfill)
if dag_id is None:
try:
dag_id = (await request.json()).get("dag_id")
except JSONDecodeError:
# Not a json body, ignore
pass
requires_access_dag(method, DagAccessEntity.RUN, dag_id)(
request,
user,
)
return inner
class PermittedPoolFilter(OrmClause[set[str]]):
"""A parameter that filters the permitted pools for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(Pool.pool.in_(self.value or set()))
def permitted_pool_filter_factory(
method: ResourceMethod,
) -> Callable[[BaseUser, BaseAuthManager], PermittedPoolFilter]:
"""
Create a callable for Depends in FastAPI that returns a filter of the permitted pools for the user.
:param method: whether filter readable or writable.
"""
def depends_permitted_pools_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedPoolFilter:
authorized_pools: set[str] = auth_manager.get_authorized_pools(user=user, method=method)
return PermittedPoolFilter(authorized_pools)
return depends_permitted_pools_filter
ReadablePoolsFilterDep = Annotated[PermittedPoolFilter, Depends(permitted_pool_filter_factory("GET"))]
def requires_access_pool(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
pool_name = request.path_params.get("pool_name")
team_name = Pool.get_team_name(pool_name) if pool_name else None
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_pool(
method=method, details=PoolDetails(name=pool_name, team_name=team_name), user=user
)
)
return inner
def requires_access_pool_bulk() -> Callable[[BulkBody[PoolBody], BaseUser], None]:
def inner(
request: BulkBody[PoolBody],
user: GetUserDep,
) -> None:
# Build the list of pool names provided as part of the request
existing_pool_names = [
cast("str", entity) if action.action == BulkAction.DELETE else cast("PoolBody", entity).pool
for action in request.actions
for entity in action.entities
if action.action != BulkAction.CREATE
]
# For each pool, find its associated team (if it exists)
pool_name_to_team = Pool.get_name_to_team_name_mapping(existing_pool_names)
requests: list[IsAuthorizedPoolRequest] = []
for action in request.actions:
methods = _get_resource_methods_from_bulk_request(action)
for pool in action.entities:
pool_name = (
cast("str", pool) if action.action == BulkAction.DELETE else cast("PoolBody", pool).pool
)
# For each pool, build a `IsAuthorizedPoolRequest`
# The list of `IsAuthorizedPoolRequest` will then be sent using `batch_is_authorized_pool`
# Each `IsAuthorizedPoolRequest` is similar to calling `is_authorized_pool`
for method in methods:
req: IsAuthorizedPoolRequest = {
"method": method,
"details": PoolDetails(
name=pool_name,
team_name=pool_name_to_team.get(pool_name),
),
}
requests.append(req)
_requires_access(
# By calling `batch_is_authorized_pool`, we check the user has access to all pools provided in the request
is_authorized_callback=lambda: get_auth_manager().batch_is_authorized_pool(
requests=requests,
user=user,
)
)
return inner
class PermittedConnectionFilter(OrmClause[set[str]]):
"""A parameter that filters the permitted connections for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(Connection.conn_id.in_(self.value or set()))
def permitted_connection_filter_factory(
method: ResourceMethod,
) -> Callable[[BaseUser, BaseAuthManager], PermittedConnectionFilter]:
"""
Create a callable for Depends in FastAPI that returns a filter of the permitted connections for the user.
:param method: whether filter readable or writable.
"""
def depends_permitted_connections_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedConnectionFilter:
authorized_connections: set[str] = auth_manager.get_authorized_connections(user=user, method=method)
return PermittedConnectionFilter(authorized_connections)
return depends_permitted_connections_filter
ReadableConnectionsFilterDep = Annotated[
PermittedConnectionFilter, Depends(permitted_connection_filter_factory("GET"))
]
def requires_access_connection(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
connection_id = request.path_params.get("connection_id")
team_name = Connection.get_team_name(connection_id) if connection_id else None
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_connection(
method=method,
details=ConnectionDetails(conn_id=connection_id, team_name=team_name),
user=user,
)
)
return inner
def requires_access_connection_bulk() -> Callable[[BulkBody[ConnectionBody], BaseUser], None]:
def inner(
request: BulkBody[ConnectionBody],
user: GetUserDep,
) -> None:
# Build the list of ``conn_id`` provided as part of the request
existing_connection_ids = [
cast("str", entity)
if action.action == BulkAction.DELETE
else cast("ConnectionBody", entity).connection_id
for action in request.actions
for entity in action.entities
if action.action != BulkAction.CREATE
]
# For each connection, find its associated team (if it exists)
conn_id_to_team = Connection.get_conn_id_to_team_name_mapping(existing_connection_ids)
requests: list[IsAuthorizedConnectionRequest] = []
for action in request.actions:
methods = _get_resource_methods_from_bulk_request(action)
for connection in action.entities:
connection_id = (
cast("str", connection)
if action.action == BulkAction.DELETE
else cast("ConnectionBody", connection).connection_id
)
# For each pool, build a `IsAuthorizedConnectionRequest`
# The list of `IsAuthorizedConnectionRequest` will then be sent using `batch_is_authorized_connection`
# Each `IsAuthorizedConnectionRequest` is similar to calling `is_authorized_connection`
for method in methods:
req: IsAuthorizedConnectionRequest = {
"method": method,
"details": ConnectionDetails(
conn_id=connection_id,
team_name=conn_id_to_team.get(connection_id),
),
}
requests.append(req)
_requires_access(
# By calling `batch_is_authorized_connection`, we check the user has access to all connections provided in the request
is_authorized_callback=lambda: get_auth_manager().batch_is_authorized_connection(
requests=requests,
user=user,
)
)
return inner
def requires_access_configuration(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
section: str | None = request.query_params.get("section") or request.path_params.get("section")
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_configuration(
method=method,
details=ConfigurationDetails(section=section),
user=user,
)
)
return inner
class PermittedTeamFilter(OrmClause[set[str]]):
"""A parameter that filters the permitted teams for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(Team.name.in_(self.value or set()))
def permitted_team_filter_factory() -> Callable[[BaseUser, BaseAuthManager], PermittedTeamFilter]:
"""Create a callable for Depends in FastAPI that returns a filter of the permitted teams for the user."""
def depends_permitted_teams_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedTeamFilter:
authorized_teams: set[str] = auth_manager.get_authorized_teams(user=user, method="GET")
return PermittedTeamFilter(authorized_teams)
return depends_permitted_teams_filter
ReadableTeamsFilterDep = Annotated[PermittedTeamFilter, Depends(permitted_team_filter_factory())]
class PermittedVariableFilter(OrmClause[set[str]]):
"""A parameter that filters the permitted variables for the user."""
def to_orm(self, select: Select) -> Select:
return select.where(Variable.key.in_(self.value or set()))
def permitted_variable_filter_factory(
method: ResourceMethod,
) -> Callable[[BaseUser, BaseAuthManager], PermittedVariableFilter]:
"""
Create a callable for Depends in FastAPI that returns a filter of the permitted variables for the user.
:param method: whether filter readable or writable.
"""
def depends_permitted_variables_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedVariableFilter:
authorized_variables: set[str] = auth_manager.get_authorized_variables(user=user, method=method)
return PermittedVariableFilter(authorized_variables)
return depends_permitted_variables_filter
ReadableVariablesFilterDep = Annotated[
PermittedVariableFilter, Depends(permitted_variable_filter_factory("GET"))
]
def requires_access_variable(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
variable_key: str | None = request.path_params.get("variable_key")
team_name = Variable.get_team_name(variable_key) if variable_key else None
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_variable(
method=method, details=VariableDetails(key=variable_key, team_name=team_name), user=user
),
)
return inner
def requires_access_variable_bulk() -> Callable[[BulkBody[VariableBody], BaseUser], None]:
def inner(
request: BulkBody[VariableBody],
user: GetUserDep,
) -> None:
# Build the list of variable keys provided as part of the request
existing_variable_keys = [
cast("str", entity) if action.action == BulkAction.DELETE else cast("VariableBody", entity).key
for action in request.actions
for entity in action.entities
if action.action != BulkAction.CREATE
]
# For each variable, find its associated team (if it exists)
var_key_to_team = Variable.get_key_to_team_name_mapping(existing_variable_keys)
requests: list[IsAuthorizedVariableRequest] = []
for action in request.actions:
methods = _get_resource_methods_from_bulk_request(action)
for variable in action.entities:
variable_key = (
cast("str", variable)
if action.action == BulkAction.DELETE
else cast("VariableBody", variable).key
)
# For each variable, build a `IsAuthorizedVariableRequest`
# The list of `IsAuthorizedVariableRequest` will then be sent using `batch_is_authorized_variable`
# Each `IsAuthorizedVariableRequest` is similar to calling `is_authorized_variable`
for method in methods:
req: IsAuthorizedVariableRequest = {
"method": method,
"details": VariableDetails(
key=variable_key,
team_name=var_key_to_team.get(variable_key),
),
}
requests.append(req)
_requires_access(
# By calling `batch_is_authorized_variable`, we check the user has access to all variables provided in the request
is_authorized_callback=lambda: get_auth_manager().batch_is_authorized_variable(
requests=requests,
user=user,
)
)
return inner
def requires_access_asset(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
asset_id = request.path_params.get("asset_id")
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_asset(
method=method, details=AssetDetails(id=asset_id), user=user
),
)
return inner
def requires_access_view(access_view: AccessView) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_view(
access_view=access_view, user=user
),
)
return inner
def requires_access_asset_alias(method: ResourceMethod) -> Callable[[Request, BaseUser], None]:
def inner(
request: Request,
user: GetUserDep,
) -> None:
asset_alias_id: str | None = request.path_params.get("asset_alias_id")
_requires_access(
is_authorized_callback=lambda: get_auth_manager().is_authorized_asset_alias(
method=method, details=AssetAliasDetails(id=asset_alias_id), user=user
),
)
return inner
def requires_authenticated() -> Callable:
"""Just ensure the user is authenticated - no need to check any specific permissions."""
def inner(
request: Request,
user: GetUserDep,
) -> None:
pass
return inner
def _requires_access(
*,
is_authorized_callback: Callable[[], bool],
) -> None:
if not is_authorized_callback():
raise HTTPException(status.HTTP_403_FORBIDDEN, "Forbidden")
def is_safe_url(target_url: str, request: Request | None = None) -> bool:
"""
Check that the URL is safe.
Needs to belong to the same domain as base_url, use HTTP or HTTPS (no JavaScript/data schemes),
is a valid normalized path.
"""
parsed_bases: tuple[tuple[str, ParseResult], ...] = ()
# Check if the target URL matches either the configured base URL, or the URL used to make the request
if request is not None:
url = str(request.base_url)
parsed_bases += ((url, urlparse(url)),)
if base_url := conf.get("api", "base_url", fallback=None):
parsed_bases += ((base_url, urlparse(base_url)),)
if not parsed_bases:
# Can't enforce any security check.
return True
for base_url, parsed_base in parsed_bases:
parsed_target = urlparse(urljoin(base_url, unquote(target_url))) # Resolves relative URLs
target_path = Path(parsed_target.path).resolve()
if target_path and parsed_base.path and not target_path.is_relative_to(parsed_base.path):
continue
if parsed_target.scheme in {"http", "https"} and parsed_target.netloc == parsed_base.netloc:
return True
return False
def _get_resource_methods_from_bulk_request(
action: BulkCreateAction | BulkUpdateAction | BulkDeleteAction,
) -> list[ResourceMethod]:
resource_methods: list[ResourceMethod] = [MAP_BULK_ACTION_TO_AUTH_METHOD[action.action]]
# If ``action_on_existence`` == ``overwrite``, we need to check the user has ``PUT`` access as well.
# With ``action_on_existence`` == ``overwrite``, a create request is actually an update request if the
# resource already exists, hence adding this check.
if action.action == BulkAction.CREATE and action.action_on_existence == BulkActionOnExistence.OVERWRITE:
resource_methods.append("PUT")
return resource_methods | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/core_api/security.py |
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_odl.common import constants as odl_const
from networking_odl.ml2 import mech_driver
from oslo_config import cfg
from oslo_log import log
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
required_opts = ('url', 'username', 'password')
for opt in required_opts:
if not getattr(self, opt):
raise cfg.RequiredOptError(opt, 'ml2_odl')
self.odl_drv = mech_driver.OpenDaylightDriver()
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.odl_drv.synchronize('create', odl_const.ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.odl_drv.synchronize('update', odl_const.ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.odl_drv.synchronize('delete', odl_const.ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.odl_drv.synchronize('create', odl_const.ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.odl_drv.synchronize('update', odl_const.ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.odl_drv.synchronize('delete', odl_const.ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.odl_drv.synchronize('create', odl_const.ODL_PORTS, context)
def update_port_postcommit(self, context):
self.odl_drv.synchronize('update', odl_const.ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.odl_drv.synchronize('delete', odl_const.ODL_PORTS, context)
def bind_port(self, context):
self.odl_drv.bind_port(context) | unknown | codeparrot/codeparrot-clean | ||
export default function Page() {
useLayoutEffect({
"Page.useLayoutEffect": ()=>{}
}["Page.useLayoutEffect"]);
useEffect({
"Page.useEffect": ()=>{}
}["Page.useEffect"]);
const onClick = useCallback({
"Page.useCallback[onClick]": ()=>[]
}["Page.useCallback[onClick]"]);
const computed = useMemo({
"Page.useMemo[computed]": ()=>{}
}["Page.useMemo[computed]"]);
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/debug-fn-name/export-default-decl/output.js |
# -*- coding: utf-8 -*-
"""
Test for lms courseware app, module render unit
"""
import ddt
import itertools
import json
from nose.plugins.attrib import attr
from functools import partial
from bson import ObjectId
from django.http import Http404, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import AnonymousUser
from mock import MagicMock, patch, Mock
from opaque_keys.edx.keys import UsageKey, CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from pyquery import PyQuery
from courseware.module_render import hash_resource
from xblock.field_data import FieldData
from xblock.runtime import Runtime
from xblock.fields import ScopeIds
from xblock.core import XBlock
from xblock.fragment import Fragment
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware import module_render as render
from courseware.courses import get_course_with_access, course_image_url, get_course_info_section
from courseware.field_overrides import OverrideFieldData
from courseware.model_data import FieldDataCache
from courseware.module_render import hash_resource, get_module_for_descriptor
from courseware.models import StudentModule
from courseware.tests.factories import StudentModuleFactory, UserFactory, GlobalStaffFactory
from courseware.tests.tests import LoginEnrollmentTestCase
from courseware.tests.test_submitting_problems import TestSubmittingProblems
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_TOY_MODULESTORE,
TEST_DATA_XML_MODULESTORE,
)
from xmodule.lti_module import LTIDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory, check_mongo_calls
from xmodule.x_module import XModuleDescriptor, XModule, STUDENT_VIEW, CombinedSystem
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@XBlock.needs("field-data")
@XBlock.needs("i18n")
@XBlock.needs("fs")
@XBlock.needs("user")
class PureXBlock(XBlock):
"""
Pure XBlock to use in tests.
"""
pass
class EmptyXModule(XModule): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
pass
class EmptyXModuleDescriptor(XModuleDescriptor): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
module_class = EmptyXModule
class GradedStatelessXBlock(XBlock):
"""
This XBlock exists to test grade storage for blocks that don't store
student state in a scoped field.
"""
@XBlock.json_handler
def set_score(self, json_data, suffix): # pylint: disable=unused-argument
"""
Set the score for this testing XBlock.
"""
self.runtime.publish(
self,
'grade',
{
'value': json_data['grade'],
'max_value': 1
}
)
@attr('shard_1')
@ddt.ddt
class ModuleRenderTestCase(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests of courseware.module_render
"""
# TODO: this test relies on the specific setup of the toy course.
# It should be rewritten to build the course it needs and then test that.
def setUp(self):
"""
Set up the course and user context
"""
super(ModuleRenderTestCase, self).setUp()
self.course_key = self.create_toy_course()
self.toy_course = modulestore().get_course(self.course_key)
self.mock_user = UserFactory()
self.mock_user.id = 1
self.request_factory = RequestFactory()
# Construct a mock module for the modulestore to return
self.mock_module = MagicMock()
self.mock_module.id = 1
self.dispatch = 'score_update'
# Construct a 'standard' xqueue_callback url
self.callback_url = reverse(
'xqueue_callback',
kwargs=dict(
course_id=self.course_key.to_deprecated_string(),
userid=str(self.mock_user.id),
mod_id=self.mock_module.id,
dispatch=self.dispatch
)
)
def test_get_module(self):
self.assertEqual(
None,
render.get_module('dummyuser', None, 'invalid location', None)
)
def test_module_render_with_jump_to_id(self):
"""
This test validates that the /jump_to_id/<id> shorthand for intracourse linking works assertIn
expected. Note there's a HTML element in the 'toy' course with the url_name 'toyjumpto' which
defines this linkage
"""
mock_request = MagicMock()
mock_request.user = self.mock_user
course = get_course_with_access(self.mock_user, 'load', self.course_key)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key, self.mock_user, course, depth=2)
module = render.get_module(
self.mock_user,
mock_request,
self.course_key.make_usage_key('html', 'toyjumpto'),
field_data_cache,
)
# get the rendered HTML output which should have the rewritten link
html = module.render(STUDENT_VIEW).content
# See if the url got rewritten to the target link
# note if the URL mapping changes then this assertion will break
self.assertIn('/courses/' + self.course_key.to_deprecated_string() + '/jump_to_id/vertical_test', html)
def test_xqueue_callback_success(self):
"""
Test for happy-path xqueue_callback
"""
fake_key = 'fake key'
xqueue_header = json.dumps({'lms_key': fake_key})
data = {
'xqueue_header': xqueue_header,
'xqueue_body': 'hello world',
}
# Patch getmodule to return our mock module
with patch('courseware.module_render.load_single_xblock', return_value=self.mock_module):
# call xqueue_callback with our mocked information
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
# Verify that handle ajax is called with the correct data
request.POST['queuekey'] = fake_key
self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST)
def test_xqueue_callback_missing_header_info(self):
data = {
'xqueue_header': '{}',
'xqueue_body': 'hello world',
}
with patch('courseware.module_render.load_single_xblock', return_value=self.mock_module):
# Test with missing xqueue data
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, {})
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
# Test with missing xqueue_header
with self.assertRaises(Http404):
request = self.request_factory.post(self.callback_url, data)
render.xqueue_callback(
request,
unicode(self.course_key),
self.mock_user.id,
self.mock_module.id,
self.dispatch
)
def test_get_score_bucket(self):
self.assertEquals(render.get_score_bucket(0, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(1, 10), 'partial')
self.assertEquals(render.get_score_bucket(10, 10), 'correct')
# get_score_bucket calls error cases 'incorrect'
self.assertEquals(render.get_score_bucket(11, 10), 'incorrect')
self.assertEquals(render.get_score_bucket(-1, 10), 'incorrect')
def test_anonymous_handle_xblock_callback(self):
dispatch_url = reverse(
'xblock_handler',
args=[
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('videosequence', 'Toy_Videos').to_deprecated_string()),
'xmodule_handler',
'goto_position'
]
)
response = self.client.post(dispatch_url, {'position': 2})
self.assertEquals(403, response.status_code)
self.assertEquals('Unauthenticated', response.content)
def test_missing_position_handler(self):
"""
Test that sending POST request without or invalid position argument don't raise server error
"""
self.client.login(username=self.mock_user.username, password="test")
dispatch_url = reverse(
'xblock_handler',
args=[
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('videosequence', 'Toy_Videos').to_deprecated_string()),
'xmodule_handler',
'goto_position'
]
)
response = self.client.post(dispatch_url)
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': ''})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': '-1'})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': "string"})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': u"Φυσικά"})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
response = self.client.post(dispatch_url, {'position': None})
self.assertEqual(200, response.status_code)
self.assertEqual(json.loads(response.content), {'success': True})
@ddt.data('pure', 'vertical')
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
def test_rebinding_same_user(self, block_type):
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory()
descriptor = ItemFactory(category=block_type, parent=course)
field_data_cache = FieldDataCache([self.toy_course, descriptor], self.toy_course.id, self.mock_user)
# This is verifying that caching doesn't cause an error during get_module_for_descriptor, which
# is why it calls the method twice identically.
render.get_module_for_descriptor(
self.mock_user,
request,
descriptor,
field_data_cache,
self.toy_course.id,
course=self.toy_course
)
render.get_module_for_descriptor(
self.mock_user,
request,
descriptor,
field_data_cache,
self.toy_course.id,
course=self.toy_course
)
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',
))
def test_rebind_different_users_ccx(self):
"""
This tests the rebinding a descriptor to a student does not result
in overly nested _field_data when CCX is enabled.
"""
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory.create(enable_ccx=True)
descriptor = ItemFactory(category='html', parent=course)
field_data_cache = FieldDataCache(
[course, descriptor], course.id, self.mock_user
)
# grab what _field_data was originally set to
original_field_data = descriptor._field_data # pylint: disable=protected-access, no-member
render.get_module_for_descriptor(
self.mock_user, request, descriptor, field_data_cache, course.id, course=course
)
# check that _unwrapped_field_data is the same as the original
# _field_data, but now _field_data as been reset.
# pylint: disable=protected-access, no-member
self.assertIs(descriptor._unwrapped_field_data, original_field_data)
self.assertIsNot(descriptor._unwrapped_field_data, descriptor._field_data)
# now bind this module to a few other students
for user in [UserFactory(), UserFactory(), UserFactory()]:
render.get_module_for_descriptor(
user,
request,
descriptor,
field_data_cache,
course.id,
course=course
)
# _field_data should now be wrapped by LmsFieldData
# pylint: disable=protected-access, no-member
self.assertIsInstance(descriptor._field_data, LmsFieldData)
# the LmsFieldData should now wrap OverrideFieldData
self.assertIsInstance(
# pylint: disable=protected-access, no-member
descriptor._field_data._authored_data._source,
OverrideFieldData
)
# the OverrideFieldData should point to the original unwrapped field_data
self.assertIs(
# pylint: disable=protected-access, no-member
descriptor._field_data._authored_data._source.fallback,
descriptor._unwrapped_field_data
)
def test_hash_resource(self):
"""
Ensure that the resource hasher works and does not fail on unicode,
decoded or otherwise.
"""
resources = ['ASCII text', u'❄ I am a special snowflake.', "❄ So am I, but I didn't tell you."]
self.assertEqual(hash_resource(resources), 'a76e27c8e80ca3efd7ce743093aa59e0')
@attr('shard_1')
class TestHandleXBlockCallback(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the handle_xblock_callback function
"""
def setUp(self):
super(TestHandleXBlockCallback, self).setUp()
self.course_key = self.create_toy_course()
self.location = self.course_key.make_usage_key('chapter', 'Overview')
self.toy_course = modulestore().get_course(self.course_key)
self.mock_user = UserFactory.create()
self.request_factory = RequestFactory()
# Construct a mock module for the modulestore to return
self.mock_module = MagicMock()
self.mock_module.id = 1
self.dispatch = 'score_update'
# Construct a 'standard' xqueue_callback url
self.callback_url = reverse(
'xqueue_callback', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'userid': str(self.mock_user.id),
'mod_id': self.mock_module.id,
'dispatch': self.dispatch
}
)
def _mock_file(self, name='file', size=10):
"""Create a mock file object for testing uploads"""
mock_file = MagicMock(
size=size,
read=lambda: 'x' * size
)
# We can't use `name` as a kwarg to Mock to set the name attribute
# because mock uses `name` to name the mock itself
mock_file.name = name
return mock_file
def test_invalid_location(self):
request = self.request_factory.post('dummy_url', data={'position': 1})
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
'invalid Location',
'dummy_handler'
'dummy_dispatch'
)
def test_too_many_files(self):
request = self.request_factory.post(
'dummy_url',
data={'file_id': (self._mock_file(), ) * (settings.MAX_FILEUPLOADS_PER_INPUT + 1)}
)
request.user = self.mock_user
self.assertEquals(
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'dummy_handler'
).content,
json.dumps({
'success': 'Submission aborted! Maximum %d files may be submitted at once' %
settings.MAX_FILEUPLOADS_PER_INPUT
}, indent=2)
)
def test_too_large_file(self):
inputfile = self._mock_file(size=1 + settings.STUDENT_FILEUPLOAD_MAX_SIZE)
request = self.request_factory.post(
'dummy_url',
data={'file_id': inputfile}
)
request.user = self.mock_user
self.assertEquals(
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'dummy_handler'
).content,
json.dumps({
'success': 'Submission aborted! Your file "%s" is too large (max size: %d MB)' %
(inputfile.name, settings.STUDENT_FILEUPLOAD_MAX_SIZE / (1000 ** 2))
}, indent=2)
)
def test_xmodule_dispatch(self):
request = self.request_factory.post('dummy_url', data={'position': 1})
request.user = self.mock_user
response = render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
self.assertIsInstance(response, HttpResponse)
def test_bad_course_id(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
'bad_course_id',
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
def test_bad_location(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.course_key.make_usage_key('chapter', 'bad_location').to_deprecated_string()),
'xmodule_handler',
'goto_position',
)
def test_bad_xmodule_dispatch(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'xmodule_handler',
'bad_dispatch',
)
def test_missing_handler(self):
request = self.request_factory.post('dummy_url')
request.user = self.mock_user
with self.assertRaises(Http404):
render.handle_xblock_callback(
request,
self.course_key.to_deprecated_string(),
quote_slashes(self.location.to_deprecated_string()),
'bad_handler',
'bad_dispatch',
)
@XBlock.register_temp_plugin(GradedStatelessXBlock, identifier='stateless_scorer')
def test_score_without_student_state(self):
course = CourseFactory.create()
block = ItemFactory.create(category='stateless_scorer', parent=course)
request = self.request_factory.post(
'dummy_url',
data=json.dumps({"grade": 0.75}),
content_type='application/json'
)
request.user = self.mock_user
response = render.handle_xblock_callback(
request,
unicode(course.id),
quote_slashes(unicode(block.scope_ids.usage_id)),
'set_score',
'',
)
self.assertEquals(response.status_code, 200)
student_module = StudentModule.objects.get(
student=self.mock_user,
module_state_key=block.scope_ids.usage_id,
)
self.assertEquals(student_module.grade, 0.75)
self.assertEquals(student_module.max_grade, 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_XBLOCK_VIEW_ENDPOINT': True})
def test_xblock_view_handler(self):
args = [
'edX/toy/2012_Fall',
quote_slashes('i4x://edX/toy/videosequence/Toy_Videos'),
'student_view'
]
xblock_view_url = reverse(
'xblock_view',
args=args
)
request = self.request_factory.get(xblock_view_url)
request.user = self.mock_user
response = render.xblock_view(request, *args)
self.assertEquals(200, response.status_code)
expected = ['csrf_token', 'html', 'resources']
content = json.loads(response.content)
for section in expected:
self.assertIn(section, content)
doc = PyQuery(content['html'])
self.assertEquals(len(doc('div.xblock-student_view-videosequence')), 1)
@attr('shard_1')
@ddt.ddt
class TestTOC(ModuleStoreTestCase):
"""Check the Table of Contents for a course"""
def setup_modulestore(self, default_ms, num_finds, num_sends):
self.course_key = self.create_toy_course()
self.chapter = 'Overview'
chapter_url = '%s/%s/%s' % ('/courses', self.course_key, self.chapter)
factory = RequestFactory()
self.request = factory.get(chapter_url)
self.request.user = UserFactory()
self.modulestore = self.store._get_modulestore_for_courselike(self.course_key) # pylint: disable=protected-access, attribute-defined-outside-init
with self.modulestore.bulk_operations(self.course_key):
with check_mongo_calls(num_finds, num_sends):
self.toy_course = self.store.get_course(self.toy_loc, depth=2)
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.toy_loc, self.request.user, self.toy_course, depth=2
)
# Mongo makes 3 queries to load the course to depth 2:
# - 1 for the course
# - 1 for its children
# - 1 for its grandchildren
# Split makes 6 queries to load the course to depth 2:
# - load the structure
# - load 5 definitions
# Split makes 5 queries to render the toc:
# - it loads the active version at the start of the bulk operation
# - it loads 4 definitions, because it instantiates 4 VideoModules
# each of which access a Scope.content field in __init__
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 5))
@ddt.unpack
def test_toc_toy_from_chapter(self, default_ms, setup_finds, setup_sends, toc_finds):
with self.store.default_store(default_ms):
self.setup_modulestore(default_ms, setup_finds, setup_sends)
expected = ([{'active': True, 'sections':
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
'format': u'Lecture Sequence', 'due': None, 'active': False},
{'url_name': 'Welcome', 'display_name': u'Welcome', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_123456789012', 'display_name': 'Test Video', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_4f66f493ac8f', 'display_name': 'Video', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'Overview', 'display_name': u'Overview'},
{'active': False, 'sections':
[{'url_name': 'toyvideo', 'display_name': 'toyvideo', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'secret:magic', 'display_name': 'secret:magic'}])
course = self.store.get_course(self.toy_course.id, depth=2)
with check_mongo_calls(toc_finds):
actual = render.toc_for_course(
self.request.user, self.request, course, self.chapter, None, self.field_data_cache
)
for toc_section in expected:
self.assertIn(toc_section, actual)
# Mongo makes 3 queries to load the course to depth 2:
# - 1 for the course
# - 1 for its children
# - 1 for its grandchildren
# Split makes 6 queries to load the course to depth 2:
# - load the structure
# - load 5 definitions
# Split makes 5 queries to render the toc:
# - it loads the active version at the start of the bulk operation
# - it loads 4 definitions, because it instantiates 4 VideoModules
# each of which access a Scope.content field in __init__
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 5))
@ddt.unpack
def test_toc_toy_from_section(self, default_ms, setup_finds, setup_sends, toc_finds):
with self.store.default_store(default_ms):
self.setup_modulestore(default_ms, setup_finds, setup_sends)
section = 'Welcome'
expected = ([{'active': True, 'sections':
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
'format': u'Lecture Sequence', 'due': None, 'active': False},
{'url_name': 'Welcome', 'display_name': u'Welcome', 'graded': True,
'format': '', 'due': None, 'active': True},
{'url_name': 'video_123456789012', 'display_name': 'Test Video', 'graded': True,
'format': '', 'due': None, 'active': False},
{'url_name': 'video_4f66f493ac8f', 'display_name': 'Video', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'Overview', 'display_name': u'Overview'},
{'active': False, 'sections':
[{'url_name': 'toyvideo', 'display_name': 'toyvideo', 'graded': True,
'format': '', 'due': None, 'active': False}],
'url_name': 'secret:magic', 'display_name': 'secret:magic'}])
with check_mongo_calls(toc_finds):
actual = render.toc_for_course(
self.request.user, self.request, self.toy_course, self.chapter, section, self.field_data_cache
)
for toc_section in expected:
self.assertIn(toc_section, actual)
@attr('shard_1')
@ddt.ddt
class TestHtmlModifiers(ModuleStoreTestCase):
"""
Tests to verify that standard modifications to the output of XModule/XBlock
student_view are taking place
"""
def setUp(self):
super(TestHtmlModifiers, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
self.course = CourseFactory.create()
self.content_string = '<p>This is the content<p>'
self.rewrite_link = '<a href="/static/foo/content">Test rewrite</a>'
self.rewrite_bad_link = '<img src="/static//file.jpg" />'
self.course_link = '<a href="/course/bar/content">Test course rewrite</a>'
self.descriptor = ItemFactory.create(
category='html',
data=self.content_string + self.rewrite_link + self.rewrite_bad_link + self.course_link
)
self.location = self.descriptor.location
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
def test_xmodule_display_wrapper_enabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
wrap_xmodule_display=True,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertEquals(len(PyQuery(result_fragment.content)('div.xblock.xblock-student_view.xmodule_HtmlModule')), 1)
def test_xmodule_display_wrapper_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
wrap_xmodule_display=False,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule"', result_fragment.content)
def test_static_link_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/c4x/{org}/{course}/asset/foo_content'.format(
org=self.course.location.org,
course=self.course.location.course,
),
result_fragment.content
)
def test_static_badlink_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/c4x/{org}/{course}/asset/_file.jpg'.format(
org=self.course.location.org,
course=self.course.location.course,
),
result_fragment.content
)
def test_static_asset_path_use(self):
'''
when a course is loaded with do_import_static=False (see xml_importer.py), then
static_asset_path is set as an lms kv in course. That should make static paths
not be mangled (ie not changed to c4x://).
'''
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
static_asset_path="toy_course_dir",
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn('href="/static/toy_course_dir', result_fragment.content)
def test_course_image(self):
url = course_image_url(self.course)
self.assertTrue(url.startswith('/c4x/'))
self.course.static_asset_path = "toy_course_dir"
url = course_image_url(self.course)
self.assertTrue(url.startswith('/static/toy_course_dir/'))
self.course.static_asset_path = ""
@override_settings(DEFAULT_COURSE_ABOUT_IMAGE_URL='test.png')
@override_settings(STATIC_URL='static/')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_image_for_split_course(self, store):
"""
for split courses if course_image is empty then course_image_url will be
the default image url defined in settings
"""
self.course = CourseFactory.create(default_store=store)
self.course.course_image = ''
url = course_image_url(self.course)
self.assertEqual('static/test.png', url)
def test_get_course_info_section(self):
self.course.static_asset_path = "toy_course_dir"
get_course_info_section(self.request, self.course, "handouts")
# NOTE: check handouts output...right now test course seems to have no such content
# at least this makes sure get_course_info_section returns without exception
def test_course_link_rewrite(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn(
'/courses/{course_id}/bar/content'.format(
course_id=self.course.id.to_deprecated_string()
),
result_fragment.content
)
class XBlockWithJsonInitData(XBlock):
"""
Pure XBlock to use in tests, with JSON init data.
"""
the_json_data = None
def student_view(self, context=None): # pylint: disable=unused-argument
"""
A simple view that returns just enough to test.
"""
frag = Fragment(u"Hello there!")
frag.add_javascript(u'alert("Hi!");')
frag.initialize_js('ThumbsBlock', self.the_json_data)
return frag
@attr('shard_1')
@ddt.ddt
class JsonInitDataTest(ModuleStoreTestCase):
"""Tests for JSON data injected into the JS init function."""
@ddt.data(
({'a': 17}, '''{"a": 17}'''),
({'xss': '</script>alert("XSS")'}, r'''{"xss": "<\/script>alert(\"XSS\")"}'''),
)
@ddt.unpack
@XBlock.register_temp_plugin(XBlockWithJsonInitData, identifier='withjson')
def test_json_init_data(self, json_data, json_output):
XBlockWithJsonInitData.the_json_data = json_data
mock_user = UserFactory()
mock_request = MagicMock()
mock_request.user = mock_user
course = CourseFactory()
descriptor = ItemFactory(category='withjson', parent=course)
field_data_cache = FieldDataCache([course, descriptor], course.id, mock_user) # pylint: disable=no-member
module = render.get_module_for_descriptor(
mock_user,
mock_request,
descriptor,
field_data_cache,
course.id, # pylint: disable=no-member
course=course
)
html = module.render(STUDENT_VIEW).content
self.assertIn(json_output, html)
# No matter what data goes in, there should only be one close-script tag.
self.assertEqual(html.count("</script>"), 1)
class ViewInStudioTest(ModuleStoreTestCase):
"""Tests for the 'View in Studio' link visiblity."""
def setUp(self):
""" Set up the user and request that will be used. """
super(ViewInStudioTest, self).setUp()
self.staff_user = GlobalStaffFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.staff_user
self.request.session = {}
self.module = None
def _get_module(self, course_id, descriptor, location):
"""
Get the module from the course from which to pattern match (or not) the 'View in Studio' buttons
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
self.staff_user,
descriptor
)
return render.get_module(
self.staff_user,
self.request,
location,
field_data_cache,
)
def setup_mongo_course(self, course_edit_method='Studio'):
""" Create a mongo backed course. """
course = CourseFactory.create(
course_edit_method=course_edit_method
)
descriptor = ItemFactory.create(
category='vertical',
parent_location=course.location,
)
child_descriptor = ItemFactory.create(
category='vertical',
parent_location=descriptor.location
)
self.module = self._get_module(course.id, descriptor, descriptor.location)
# pylint: disable=attribute-defined-outside-init
self.child_module = self._get_module(course.id, child_descriptor, child_descriptor.location)
def setup_xml_course(self):
"""
Define the XML backed course to use.
Toy courses are already loaded in XML and mixed modulestores.
"""
course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
location = course_key.make_usage_key('chapter', 'Overview')
descriptor = modulestore().get_item(location)
self.module = self._get_module(course_key, descriptor, location)
@attr('shard_1')
class MongoViewInStudioTest(ViewInStudioTest):
"""Test the 'View in Studio' link visibility in a mongo backed course."""
def test_view_in_studio_link_studio_course(self):
"""Regular Studio courses should see 'View in Studio' links."""
self.setup_mongo_course()
result_fragment = self.module.render(STUDENT_VIEW)
self.assertIn('View Unit in Studio', result_fragment.content)
def test_view_in_studio_link_only_in_top_level_vertical(self):
"""Regular Studio courses should not see 'View in Studio' for child verticals of verticals."""
self.setup_mongo_course()
# Render the parent vertical, then check that there is only a single "View Unit in Studio" link.
result_fragment = self.module.render(STUDENT_VIEW)
# The single "View Unit in Studio" link should appear before the first xmodule vertical definition.
parts = result_fragment.content.split('data-block-type="vertical"')
self.assertEqual(3, len(parts), "Did not find two vertical blocks")
self.assertIn('View Unit in Studio', parts[0])
self.assertNotIn('View Unit in Studio', parts[1])
self.assertNotIn('View Unit in Studio', parts[2])
def test_view_in_studio_link_xml_authored(self):
"""Courses that change 'course_edit_method' setting can hide 'View in Studio' links."""
self.setup_mongo_course(course_edit_method='XML')
result_fragment = self.module.render(STUDENT_VIEW)
self.assertNotIn('View Unit in Studio', result_fragment.content)
@attr('shard_1')
class MixedViewInStudioTest(ViewInStudioTest):
"""Test the 'View in Studio' link visibility in a mixed mongo backed course."""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
def test_view_in_studio_link_mongo_backed(self):
"""Mixed mongo courses that are mongo backed should see 'View in Studio' links."""
self.setup_mongo_course()
result_fragment = self.module.render(STUDENT_VIEW)
self.assertIn('View Unit in Studio', result_fragment.content)
def test_view_in_studio_link_xml_authored(self):
"""Courses that change 'course_edit_method' setting can hide 'View in Studio' links."""
self.setup_mongo_course(course_edit_method='XML')
result_fragment = self.module.render(STUDENT_VIEW)
self.assertNotIn('View Unit in Studio', result_fragment.content)
def test_view_in_studio_link_xml_backed(self):
"""Course in XML only modulestore should not see 'View in Studio' links."""
self.setup_xml_course()
result_fragment = self.module.render(STUDENT_VIEW)
self.assertNotIn('View Unit in Studio', result_fragment.content)
@attr('shard_1')
class XmlViewInStudioTest(ViewInStudioTest):
"""Test the 'View in Studio' link visibility in an xml backed course."""
MODULESTORE = TEST_DATA_XML_MODULESTORE
def test_view_in_studio_link_xml_backed(self):
"""Course in XML only modulestore should not see 'View in Studio' links."""
self.setup_xml_course()
result_fragment = self.module.render(STUDENT_VIEW)
self.assertNotIn('View Unit in Studio', result_fragment.content)
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_DEBUG_INFO_TO_STAFF': True, 'DISPLAY_HISTOGRAMS_TO_STAFF': True})
@patch('courseware.module_render.has_access', Mock(return_value=True))
class TestStaffDebugInfo(ModuleStoreTestCase):
"""Tests to verify that Staff Debug Info panel and histograms are displayed to staff."""
def setUp(self):
super(TestStaffDebugInfo, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
self.course = CourseFactory.create()
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
self.descriptor = ItemFactory.create(
category='problem',
data=problem_xml,
display_name='Option Response Problem'
)
self.location = self.descriptor.location
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_DEBUG_INFO_TO_STAFF': False})
def test_staff_debug_info_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('Staff Debug', result_fragment.content)
def test_staff_debug_info_enabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertIn('Staff Debug', result_fragment.content)
@patch.dict('django.conf.settings.FEATURES', {'DISPLAY_HISTOGRAMS_TO_STAFF': False})
def test_histogram_disabled(self):
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
result_fragment = module.render(STUDENT_VIEW)
self.assertNotIn('histrogram', result_fragment.content)
def test_histogram_enabled_for_unscored_xmodules(self):
"""Histograms should not display for xmodules which are not scored."""
html_descriptor = ItemFactory.create(
category='html',
data='Here are some course details.'
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id,
self.user,
self.descriptor
)
with patch('openedx.core.lib.xblock_utils.grade_histogram') as mock_grade_histogram:
mock_grade_histogram.return_value = []
module = render.get_module(
self.user,
self.request,
html_descriptor.location,
field_data_cache,
)
module.render(STUDENT_VIEW)
self.assertFalse(mock_grade_histogram.called)
def test_histogram_enabled_for_scored_xmodules(self):
"""Histograms should display for xmodules which are scored."""
StudentModuleFactory.create(
course_id=self.course.id,
module_state_key=self.location,
student=UserFactory(),
grade=1,
max_grade=1,
state="{}",
)
with patch('openedx.core.lib.xblock_utils.grade_histogram') as mock_grade_histogram:
mock_grade_histogram.return_value = []
module = render.get_module(
self.user,
self.request,
self.location,
self.field_data_cache,
)
module.render(STUDENT_VIEW)
self.assertTrue(mock_grade_histogram.called)
PER_COURSE_ANONYMIZED_DESCRIPTORS = (LTIDescriptor, )
# The "set" here is to work around the bug that load_classes returns duplicates for multiply-delcared classes.
PER_STUDENT_ANONYMIZED_DESCRIPTORS = set(
class_ for (name, class_) in XModuleDescriptor.load_classes()
if not issubclass(class_, PER_COURSE_ANONYMIZED_DESCRIPTORS)
)
@attr('shard_1')
@ddt.ddt
class TestAnonymousStudentId(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test that anonymous_student_id is set correctly across a variety of XBlock types
"""
def setUp(self):
super(TestAnonymousStudentId, self).setUp(create_user=False)
self.user = UserFactory()
self.course_key = self.create_toy_course()
self.course = modulestore().get_course(self.course_key)
@patch('courseware.module_render.has_access', Mock(return_value=True))
def _get_anonymous_id(self, course_id, xblock_class):
location = course_id.make_usage_key('dummy_category', 'dummy_name')
descriptor = Mock(
spec=xblock_class,
_field_data=Mock(spec=FieldData, name='field_data'),
location=location,
static_asset_path=None,
_runtime=Mock(
spec=Runtime,
resources_fs=None,
mixologist=Mock(_mixins=(), name='mixologist'),
name='runtime',
),
scope_ids=Mock(spec=ScopeIds),
name='descriptor',
_field_data_cache={},
_dirty_fields={},
fields={},
days_early_for_beta=None,
)
descriptor.runtime = CombinedSystem(descriptor._runtime, None) # pylint: disable=protected-access
# Use the xblock_class's bind_for_student method
descriptor.bind_for_student = partial(xblock_class.bind_for_student, descriptor)
if hasattr(xblock_class, 'module_class'):
descriptor.module_class = xblock_class.module_class
return render.get_module_for_descriptor_internal(
user=self.user,
descriptor=descriptor,
student_data=Mock(spec=FieldData, name='student_data'),
course_id=course_id,
track_function=Mock(name='track_function'), # Track Function
xqueue_callback_url_prefix=Mock(name='xqueue_callback_url_prefix'), # XQueue Callback Url Prefix
request_token='request_token',
course=self.course,
).xmodule_runtime.anonymous_student_id
@ddt.data(*PER_STUDENT_ANONYMIZED_DESCRIPTORS)
def test_per_student_anonymized_id(self, descriptor_class):
for course_id in ('MITx/6.00x/2012_Fall', 'MITx/6.00x/2013_Spring'):
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'5afe5d9bb03796557ee2614f5c9611fb',
self._get_anonymous_id(CourseKey.from_string(course_id), descriptor_class)
)
@ddt.data(*PER_COURSE_ANONYMIZED_DESCRIPTORS)
def test_per_course_anonymized_id(self, descriptor_class):
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'e3b0b940318df9c14be59acb08e78af5',
self._get_anonymous_id(SlashSeparatedCourseKey('MITx', '6.00x', '2012_Fall'), descriptor_class)
)
self.assertEquals(
# This value is set by observation, so that later changes to the student
# id computation don't break old data
'f82b5416c9f54b5ce33989511bb5ef2e',
self._get_anonymous_id(SlashSeparatedCourseKey('MITx', '6.00x', '2013_Spring'), descriptor_class)
)
@attr('shard_1')
@patch('track.views.tracker')
class TestModuleTrackingContext(ModuleStoreTestCase):
"""
Ensure correct tracking information is included in events emitted during XBlock callback handling.
"""
def setUp(self):
super(TestModuleTrackingContext, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.request.session = {}
self.course = CourseFactory.create()
self.problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
def test_context_contains_display_name(self, mock_tracker):
problem_display_name = u'Option Response Problem'
module_info = self.handle_callback_and_get_module_info(mock_tracker, problem_display_name)
self.assertEquals(problem_display_name, module_info['display_name'])
def handle_callback_and_get_module_info(self, mock_tracker, problem_display_name=None):
"""
Creates a fake module, invokes the callback and extracts the 'module'
metadata from the emitted problem_check event.
"""
descriptor_kwargs = {
'category': 'problem',
'data': self.problem_xml
}
if problem_display_name:
descriptor_kwargs['display_name'] = problem_display_name
descriptor = ItemFactory.create(**descriptor_kwargs)
render.handle_xblock_callback(
self.request,
self.course.id.to_deprecated_string(),
quote_slashes(descriptor.location.to_deprecated_string()),
'xmodule_handler',
'problem_check',
)
self.assertEquals(len(mock_tracker.send.mock_calls), 1)
mock_call = mock_tracker.send.mock_calls[0]
event = mock_call[1][0]
self.assertEquals(event['event_type'], 'problem_check')
return event['context']['module']
def test_missing_display_name(self, mock_tracker):
actual_display_name = self.handle_callback_and_get_module_info(mock_tracker)['display_name']
self.assertTrue(actual_display_name.startswith('problem'))
def test_library_source_information(self, mock_tracker):
"""
Check that XBlocks that are inherited from a library include the
information about their library block source in events.
We patch the modulestore to avoid having to create a library.
"""
original_usage_key = UsageKey.from_string(u'block-v1:A+B+C+type@problem+block@abcd1234')
original_usage_version = ObjectId()
mock_get_original_usage = lambda _, key: (original_usage_key, original_usage_version)
with patch('xmodule.modulestore.mixed.MixedModuleStore.get_block_original_usage', mock_get_original_usage):
module_info = self.handle_callback_and_get_module_info(mock_tracker)
self.assertIn('original_usage_key', module_info)
self.assertEqual(module_info['original_usage_key'], unicode(original_usage_key))
self.assertIn('original_usage_version', module_info)
self.assertEqual(module_info['original_usage_version'], unicode(original_usage_version))
@attr('shard_1')
class TestXmoduleRuntimeEvent(TestSubmittingProblems):
"""
Inherit from TestSubmittingProblems to get functionality that set up a course and problems structure
"""
def setUp(self):
super(TestXmoduleRuntimeEvent, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.grade_dict = {'value': 0.18, 'max_value': 32, 'user_id': self.student_user.id}
self.delete_dict = {'value': None, 'max_value': None, 'user_id': self.student_user.id}
def get_module_for_user(self, user):
"""Helper function to get useful module at self.location in self.course_id for user"""
mock_request = MagicMock()
mock_request.user = user
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id, user, self.course, depth=2)
return render.get_module( # pylint: disable=protected-access
user,
mock_request,
self.problem.location,
field_data_cache,
)._xmodule
def set_module_grade_using_publish(self, grade_dict):
"""Publish the user's grade, takes grade_dict as input"""
module = self.get_module_for_user(self.student_user)
module.system.publish(module, 'grade', grade_dict)
return module
def test_xmodule_runtime_publish(self):
"""Tests the publish mechanism"""
self.set_module_grade_using_publish(self.grade_dict)
student_module = StudentModule.objects.get(student=self.student_user, module_state_key=self.problem.location)
self.assertEqual(student_module.grade, self.grade_dict['value'])
self.assertEqual(student_module.max_grade, self.grade_dict['max_value'])
def test_xmodule_runtime_publish_delete(self):
"""Test deleting the grade using the publish mechanism"""
module = self.set_module_grade_using_publish(self.grade_dict)
module.system.publish(module, 'grade', self.delete_dict)
student_module = StudentModule.objects.get(student=self.student_user, module_state_key=self.problem.location)
self.assertIsNone(student_module.grade)
self.assertIsNone(student_module.max_grade)
@patch('courseware.module_render.SCORE_CHANGED.send')
def test_score_change_signal(self, send_mock):
"""Test that a Django signal is generated when a score changes"""
self.set_module_grade_using_publish(self.grade_dict)
expected_signal_kwargs = {
'sender': None,
'points_possible': self.grade_dict['max_value'],
'points_earned': self.grade_dict['value'],
'user_id': self.student_user.id,
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location)
}
send_mock.assert_called_with(**expected_signal_kwargs)
@attr('shard_1')
class TestRebindModule(TestSubmittingProblems):
"""
Tests to verify the functionality of rebinding a module.
Inherit from TestSubmittingProblems to get functionality that set up a course structure
"""
def setUp(self):
super(TestRebindModule, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.lti = ItemFactory.create(category='lti', parent=self.homework)
self.user = UserFactory.create()
self.anon_user = AnonymousUser()
def get_module_for_user(self, user):
"""Helper function to get useful module at self.location in self.course_id for user"""
mock_request = MagicMock()
mock_request.user = user
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course.id, user, self.course, depth=2)
return render.get_module( # pylint: disable=protected-access
user,
mock_request,
self.lti.location,
field_data_cache,
)._xmodule
def test_rebind_noauth_module_to_user_not_anonymous(self):
"""
Tests that an exception is thrown when rebind_noauth_module_to_user is run from a
module bound to a real user
"""
module = self.get_module_for_user(self.user)
user2 = UserFactory()
user2.id = 2
with self.assertRaisesRegexp(
render.LmsModuleRenderError,
"rebind_noauth_module_to_user can only be called from a module bound to an anonymous user"
):
self.assertTrue(module.system.rebind_noauth_module_to_user(module, user2))
def test_rebind_noauth_module_to_user_anonymous(self):
"""
Tests that get_user_module_for_noauth succeeds when rebind_noauth_module_to_user is run from a
module bound to AnonymousUser
"""
module = self.get_module_for_user(self.anon_user)
user2 = UserFactory()
user2.id = 2
module.system.rebind_noauth_module_to_user(module, user2)
self.assertTrue(module)
self.assertEqual(module.system.anonymous_student_id, anonymous_id_for_user(user2, self.course.id))
self.assertEqual(module.scope_ids.user_id, user2.id)
self.assertEqual(module.descriptor.scope_ids.user_id, user2.id)
@patch('courseware.module_render.make_psychometrics_data_update_handler')
@patch.dict(settings.FEATURES, {'ENABLE_PSYCHOMETRICS': True})
def test_psychometrics_anonymous(self, psycho_handler):
"""
Make sure that noauth modules with anonymous users don't have
the psychometrics callback bound.
"""
module = self.get_module_for_user(self.anon_user)
module.system.rebind_noauth_module_to_user(module, self.anon_user)
self.assertFalse(psycho_handler.called)
@attr('shard_1')
@ddt.ddt
class TestEventPublishing(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests of event publishing for both XModules and XBlocks.
"""
def setUp(self):
"""
Set up the course and user context
"""
super(TestEventPublishing, self).setUp()
self.mock_user = UserFactory()
self.mock_user.id = 1
self.request_factory = RequestFactory()
@ddt.data('xblock', 'xmodule')
@XBlock.register_temp_plugin(PureXBlock, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptor, identifier='xmodule')
@patch.object(render, 'make_track_function')
def test_event_publishing(self, block_type, mock_track_function):
request = self.request_factory.get('')
request.user = self.mock_user
course = CourseFactory()
descriptor = ItemFactory(category=block_type, parent=course)
field_data_cache = FieldDataCache([course, descriptor], course.id, self.mock_user) # pylint: disable=no-member
block = render.get_module(self.mock_user, request, descriptor.location, field_data_cache)
event_type = 'event_type'
event = {'event': 'data'}
block.runtime.publish(block, event_type, event)
mock_track_function.assert_called_once_with(request)
mock_track_function.return_value.assert_called_once_with(event_type, event)
@attr('shard_1')
@ddt.ddt
class LMSXBlockServiceBindingTest(ModuleStoreTestCase):
"""
Tests that the LMS Module System (XBlock Runtime) provides an expected set of services.
"""
def setUp(self):
"""
Set up the user and other fields that will be used to instantiate the runtime.
"""
super(LMSXBlockServiceBindingTest, self).setUp()
self.user = UserFactory()
self.student_data = Mock()
self.course = CourseFactory.create()
self.track_function = Mock()
self.xqueue_callback_url_prefix = Mock()
self.request_token = Mock()
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
@ddt.data("user", "i18n", "fs", "field-data")
def test_expected_services_exist(self, expected_service):
"""
Tests that the 'user', 'i18n', and 'fs' services are provided by the LMS runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
runtime, _ = render.get_module_system_for_user(
self.user,
self.student_data,
descriptor,
self.course.id,
self.track_function,
self.xqueue_callback_url_prefix,
self.request_token,
course=self.course
)
service = runtime.service(descriptor, expected_service)
self.assertIsNotNone(service)
def test_beta_tester_fields_added(self):
"""
Tests that the beta tester fields are set on LMS runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
descriptor.days_early_for_beta = 5
runtime, _ = render.get_module_system_for_user(
self.user,
self.student_data,
descriptor,
self.course.id,
self.track_function,
self.xqueue_callback_url_prefix,
self.request_token,
course=self.course
)
self.assertFalse(getattr(runtime, u'user_is_beta_tester'))
self.assertEqual(getattr(runtime, u'days_early_for_beta'), 5)
class PureXBlockWithChildren(PureXBlock):
"""
Pure XBlock with children to use in tests.
"""
has_children = True
class EmptyXModuleWithChildren(EmptyXModule): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
has_children = True
class EmptyXModuleDescriptorWithChildren(EmptyXModuleDescriptor): # pylint: disable=abstract-method
"""
Empty XModule for testing with no dependencies.
"""
module_class = EmptyXModuleWithChildren
has_children = True
BLOCK_TYPES = ['xblock', 'xmodule']
USER_NUMBERS = range(2)
@attr('shard_1')
@ddt.ddt
class TestFilteredChildren(ModuleStoreTestCase):
"""
Tests that verify access to XBlock/XModule children work correctly
even when those children are filtered by the runtime when loaded.
"""
# pylint: disable=attribute-defined-outside-init, no-member
def setUp(self):
super(TestFilteredChildren, self).setUp()
self.users = {number: UserFactory() for number in USER_NUMBERS}
self.course = CourseFactory()
self._old_has_access = render.has_access
patcher = patch('courseware.module_render.has_access', self._has_access)
patcher.start()
self.addCleanup(patcher.stop)
@ddt.data(*BLOCK_TYPES)
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound(self, block_type):
block = self._load_block(block_type)
self.assertUnboundChildren(block)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound_then_bound_as_descriptor(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self.assertUnboundChildren(block)
self._bind_block(block, user)
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_unbound_then_bound_as_xmodule(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self.assertUnboundChildren(block)
self._bind_block(block, user)
# Validate direct XModule access as well
if isinstance(block, XModuleDescriptor):
self.assertBoundChildren(block._xmodule, user) # pylint: disable=protected-access
else:
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_bound_only_as_descriptor(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self._bind_block(block, user)
self.assertBoundChildren(block, user)
@ddt.data(*itertools.product(BLOCK_TYPES, USER_NUMBERS))
@ddt.unpack
@XBlock.register_temp_plugin(PureXBlockWithChildren, identifier='xblock')
@XBlock.register_temp_plugin(EmptyXModuleDescriptorWithChildren, identifier='xmodule')
def test_bound_only_as_xmodule(self, block_type, user_number):
user = self.users[user_number]
block = self._load_block(block_type)
self._bind_block(block, user)
# Validate direct XModule access as well
if isinstance(block, XModuleDescriptor):
self.assertBoundChildren(block._xmodule, user) # pylint: disable=protected-access
else:
self.assertBoundChildren(block, user)
def _load_block(self, block_type):
"""
Instantiate an XBlock of `block_type` with the appropriate set of children.
"""
self.parent = ItemFactory(category=block_type, parent=self.course)
# Create a child of each block type for each user
self.children_for_user = {
user: [
ItemFactory(category=child_type, parent=self.parent).scope_ids.usage_id
for child_type in BLOCK_TYPES
]
for user in self.users.itervalues()
}
self.all_children = sum(self.children_for_user.values(), [])
return modulestore().get_item(self.parent.scope_ids.usage_id)
def _bind_block(self, block, user):
"""
Bind `block` to the supplied `user`.
"""
course_id = self.course.id
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id,
user,
block,
)
return get_module_for_descriptor(
user,
Mock(name='request', user=user),
block,
field_data_cache,
course_id,
course=self.course
)
def _has_access(self, user, action, obj, course_key=None):
"""
Mock implementation of `has_access` used to control which blocks
have access to which children during tests.
"""
if action != 'load':
return self._old_has_access(user, action, obj, course_key)
if isinstance(obj, XBlock):
key = obj.scope_ids.usage_id
elif isinstance(obj, UsageKey):
key = obj
if key == self.parent.scope_ids.usage_id:
return True
return key in self.children_for_user[user]
def assertBoundChildren(self, block, user):
"""
Ensure the bound children are indeed children.
"""
self.assertChildren(block, self.children_for_user[user])
def assertUnboundChildren(self, block):
"""
Ensure unbound children are indeed children.
"""
self.assertChildren(block, self.all_children)
def assertChildren(self, block, child_usage_ids):
"""
Used to assert that sets of children are equivalent.
"""
self.assertEquals(set(child_usage_ids), set(child.scope_ids.usage_id for child in block.get_children()))
@attr('shard_1')
@ddt.ddt
class TestDisabledXBlockTypes(ModuleStoreTestCase):
"""
Tests that verify disabled XBlock types are not loaded.
"""
# pylint: disable=attribute-defined-outside-init, no-member
def setUp(self):
super(TestDisabledXBlockTypes, self).setUp()
for store in self.store.modulestores:
store.disabled_xblock_types = ('combinedopenended', 'peergrading', 'video')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_item(self, default_ms):
with self.store.default_store(default_ms):
course = CourseFactory()
for block_type in ('peergrading', 'combinedopenended', 'video'):
item = ItemFactory(category=block_type, parent=course)
item = self.store.get_item(item.scope_ids.usage_id)
self.assertEqual(item.__class__.__name__, 'RawDescriptorWithMixins') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
i_gensig.py
-----------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .i import regroupRasters, exportSigFile
def processCommand(alg, parameters, context):
# We need to extract the basename of the signature file
signatureFile = alg.parameterAsString(parameters, 'signaturefile', context)
shortSigFile = os.path.basename(signatureFile)
parameters['signaturefile'] = shortSigFile
# Regroup rasters
group, subgroup = regroupRasters(alg, parameters, context, 'input', 'group', 'subgroup')
alg.processCommand(parameters, context)
# Re-add signature files
parameters['signaturefile'] = signatureFile
# Export signature file
exportSigFile(alg, group, subgroup, signatureFile) | unknown | codeparrot/codeparrot-clean | ||
"""
Selectors
"""
# top-level imports
from scrapy.selector.unified import Selector, SelectorList
__all__ = [
"Selector",
"SelectorList",
] | python | github | https://github.com/scrapy/scrapy | scrapy/selector/__init__.py |
// RUN: clang-reorder-fields -record-name Foo -fields-order e,x,pi,s2,s1 %s -- -std=c++11 | FileCheck %s
class Foo {
public:
Foo();
private:
int x; // CHECK: {{^ double e = 2.71;}}
const char *s1; // CHECK-NEXT: {{^ int x;}}
const char *s2; // CHECK-NEXT: {{^ double pi = 3.14;}}
double pi = 3.14; // CHECK-NEXT: {{^ const char \*s2;}}
double e = 2.71; // CHECK-NEXT: {{^ const char \*s1;}}
};
Foo::Foo():
x(12), // CHECK: {{^ x\(12\)}},
s1("abc"), // CHECK-NEXT: {{^ s2\("def"\)}},
s2("def") // CHECK-NEXT: {{^ s1\("abc"\)}}
{}
int main() {
Foo foo;
return 0;
} | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-reorder-fields/ClassMixedInitialization.cpp |
package daemon
import (
"testing"
containertypes "github.com/moby/moby/api/types/container"
"github.com/moby/moby/v2/daemon/container"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestGetInspectData(t *testing.T) {
c := &container.Container{
ID: "inspect-me",
HostConfig: &containertypes.HostConfig{},
State: &container.State{},
ExecCommands: container.NewExecStore(),
}
d := &Daemon{
linkIndex: newLinkIndex(),
}
if d.UsesSnapshotter() {
t.Skip("does not apply to containerd snapshotters, which don't have RWLayer set")
}
cfg := &configStore{}
d.configStore.Store(cfg)
_, _, err := d.getInspectData(&cfg.Config, c)
assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil"))
c.State.Dead = true
_, _, err = d.getInspectData(&cfg.Config, c)
assert.Check(t, err)
} | go | github | https://github.com/moby/moby | daemon/inspect_test.go |
"""
Certificate service
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from lms.djangoapps.utils import _get_key
from opaque_keys.edx.keys import CourseKey
from .models import GeneratedCertificate
log = logging.getLogger(__name__)
class CertificateService(object):
"""
User Certificate service
"""
def invalidate_certificate(self, user_id, course_key_or_id):
"""
Invalidate the user certificate in a given course if it exists.
"""
course_key = _get_key(course_key_or_id, CourseKey)
try:
generated_certificate = GeneratedCertificate.objects.get(
user=user_id,
course_id=course_key
)
generated_certificate.invalidate()
log.info(
u'Certificate invalidated for user %d in course %s',
user_id,
course_key
)
except ObjectDoesNotExist:
log.warning(
u'Invalidation failed because a certificate for user %d in course %s does not exist.',
user_id,
course_key
) | unknown | codeparrot/codeparrot-clean | ||
//go:build !windows
package main
import (
"encoding/json"
"strings"
"testing"
"time"
"github.com/moby/moby/api/types/swarm"
"github.com/moby/moby/v2/integration-cli/checker"
"github.com/moby/moby/v2/internal/testutil"
"gotest.tools/v3/assert"
"gotest.tools/v3/poll"
)
func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *testing.T) {
ctx := testutil.GetContext(c)
d := s.AddDaemon(ctx, c, true, true)
out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top")
assert.NilError(c, err, out)
// Make sure task stays pending before plugin is available
poll.WaitOn(c, pollCheck(c, d.CheckServiceTasksInStateWithError(ctx, "top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
plugin := newVolumePlugin(c, "customvolumedriver")
defer plugin.Close()
// create a dummy volume to trigger lazy loading of the plugin
out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello")
assert.NilError(c, err, out)
// TODO(aaronl): It will take about 15 seconds for swarm to realize the
// plugin was loaded. Switching the test over to plugin v2 would avoid
// this long delay.
// make sure task has been deployed.
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount(ctx), checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout))
out, err = d.Cmd("ps", "-q")
assert.NilError(c, err)
containerID := strings.TrimSpace(out)
out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID)
assert.NilError(c, err)
var mounts []struct {
Name string
Driver string
}
assert.NilError(c, json.NewDecoder(strings.NewReader(out)).Decode(&mounts))
assert.Equal(c, len(mounts), 1, out)
assert.Equal(c, mounts[0].Name, "my-volume")
assert.Equal(c, mounts[0].Driver, "customvolumedriver")
}
// Test network plugin filter in swarm
func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *testing.T) {
testRequires(c, IsAmd64)
ctx := testutil.GetContext(c)
d1 := s.AddDaemon(ctx, c, true, true)
d2 := s.AddDaemon(ctx, c, true, false)
// install plugin on d1 and d2
const pluginName = "aragunathan/global-net-plugin:latest"
_, err := d1.Cmd("plugin", "install", pluginName, "--grant-all-permissions")
assert.NilError(c, err)
_, err = d2.Cmd("plugin", "install", pluginName, "--grant-all-permissions")
assert.NilError(c, err)
// create network
const networkName = "globalnet"
_, err = d1.Cmd("network", "create", "--driver", pluginName, networkName)
assert.NilError(c, err)
// create a global service to ensure that both nodes will have an instance
const serviceName = "my-service"
_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top")
assert.NilError(c, err)
// wait for tasks ready
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(2)), poll.WithTimeout(defaultReconciliationTimeout))
// remove service
_, err = d1.Cmd("service", "rm", serviceName)
assert.NilError(c, err)
// wait to ensure all containers have exited before removing the plugin. Else there's a
// possibility of container exits erroring out due to plugins being unavailable.
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount(ctx), d2.CheckActiveContainerCount(ctx)), checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout))
// disable plugin on worker
_, err = d2.Cmd("plugin", "disable", "-f", pluginName)
assert.NilError(c, err)
time.Sleep(20 * time.Second)
const imgName = "busybox:latest"
// create a new global service again.
_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, imgName, "top")
assert.NilError(c, err)
poll.WaitOn(c, pollCheck(c, d1.CheckRunningTaskImages(ctx), checker.DeepEquals(map[string]int{imgName: 1})), poll.WithTimeout(defaultReconciliationTimeout))
} | go | github | https://github.com/moby/moby | integration-cli/docker_cli_swarm_unix_test.go |
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cygwinccompiler.py 65835 2008-08-18 19:33:42Z amaury.forgeotdarc $"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or 7.1.
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
self.dll_libraries = ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
self.dll_libraries = ['msvcr71']
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or 7.1.
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
self.dll_libraries = ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
self.dll_libraries = ['msvcr71']
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot;
import java.lang.Thread.State;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.awaitility.Awaitility;
import org.jspecify.annotations.Nullable;
import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import org.springframework.beans.factory.BeanCreationException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.context.support.GenericApplicationContext;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException;
import static org.assertj.core.api.Assertions.assertThatIllegalStateException;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
/**
* Tests for {@link SpringApplicationShutdownHook}.
*
* @author Phillip Webb
* @author Andy Wilkinson
* @author Brian Clozel
*/
class SpringApplicationShutdownHookTests {
@Test
void shutdownHookIsNotAddedUntilContextIsRegistered() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
shutdownHook.enableShutdownHookAddition();
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isFalse();
ConfigurableApplicationContext context = new GenericApplicationContext();
shutdownHook.registerApplicationContext(context);
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isTrue();
}
@Test
void shutdownHookIsNotAddedUntilHandlerIsRegistered() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
shutdownHook.enableShutdownHookAddition();
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isFalse();
shutdownHook.getHandlers().add(() -> {
});
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isTrue();
}
@Test
void shutdownHookIsNotAddedUntilAdditionIsEnabled() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
shutdownHook.getHandlers().add(() -> {
});
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isFalse();
shutdownHook.enableShutdownHookAddition();
shutdownHook.getHandlers().add(() -> {
});
assertThat(shutdownHook.isRuntimeShutdownHookAdded()).isTrue();
}
@Test
void runClosesContextsBeforeRunningHandlerActions() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
List<Object> finished = new CopyOnWriteArrayList<>();
ConfigurableApplicationContext context = new TestApplicationContext(finished);
shutdownHook.registerApplicationContext(context);
context.refresh();
Runnable handlerAction = new TestHandlerAction(finished);
shutdownHook.getHandlers().add(handlerAction);
shutdownHook.run();
assertThat(finished).containsExactly(context, handlerAction);
}
@Test
void runWhenContextIsBeingClosedInAnotherThreadWaitsUntilContextIsInactive() throws InterruptedException {
// This situation occurs in the Spring Tools IDE. It triggers a context close via
// JMX and then stops the JVM. The two actions happen almost simultaneously
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
List<Object> finished = new CopyOnWriteArrayList<>();
CountDownLatch closing = new CountDownLatch(1);
CountDownLatch proceedWithClose = new CountDownLatch(1);
ConfigurableApplicationContext context = new TestApplicationContext(finished, closing, proceedWithClose);
shutdownHook.registerApplicationContext(context);
context.refresh();
Runnable handlerAction = new TestHandlerAction(finished);
shutdownHook.getHandlers().add(handlerAction);
Thread contextThread = new Thread(context::close);
contextThread.start();
// Wait for context thread to begin closing the context
closing.await();
Thread shutdownThread = new Thread(shutdownHook);
shutdownThread.start();
// Shutdown thread should start waiting for context to become inactive
Awaitility.await().atMost(Duration.ofSeconds(30)).until(shutdownThread::getState, State.TIMED_WAITING::equals);
// Allow context thread to proceed, unblocking shutdown thread
proceedWithClose.countDown();
contextThread.join();
shutdownThread.join();
// Context should have been closed before handler action was run
assertThat(finished).containsExactly(context, handlerAction);
}
@Test
void runDueToExitDuringRefreshWhenContextHasBeenClosedDoesNotDeadlock() {
GenericApplicationContext context = new GenericApplicationContext();
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
shutdownHook.registerApplicationContext(context);
context.registerBean(CloseContextAndExit.class, context, shutdownHook);
context.refresh();
}
@Test
void runWhenContextIsClosedDirectlyRunsHandlerActions() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
List<Object> finished = new CopyOnWriteArrayList<>();
ConfigurableApplicationContext context = new TestApplicationContext(finished);
shutdownHook.registerApplicationContext(context);
context.refresh();
context.close();
Runnable handlerAction1 = new TestHandlerAction(finished);
Runnable handlerAction2 = new TestHandlerAction(finished);
shutdownHook.getHandlers().add(handlerAction1);
shutdownHook.getHandlers().add(handlerAction2);
shutdownHook.run();
assertThat(finished).contains(handlerAction1, handlerAction2);
}
@Test
@SuppressWarnings("NullAway") // Test null check
void addHandlerActionWhenNullThrowsException() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
assertThatIllegalArgumentException().isThrownBy(() -> shutdownHook.getHandlers().add(null))
.withMessage("'action' must not be null");
}
@Test
void addHandlerActionWhenShuttingDownThrowsException() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
shutdownHook.run();
Runnable handlerAction = new TestHandlerAction(new ArrayList<>());
assertThatIllegalStateException().isThrownBy(() -> shutdownHook.getHandlers().add(handlerAction))
.withMessage("Shutdown in progress");
}
@Test
@SuppressWarnings("NullAway") // Test null check
void removeHandlerActionWhenNullThrowsException() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
assertThatIllegalArgumentException().isThrownBy(() -> shutdownHook.getHandlers().remove(null))
.withMessage("'action' must not be null");
}
@Test
void removeHandlerActionWhenShuttingDownThrowsException() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
Runnable handlerAction = new TestHandlerAction(new ArrayList<>());
shutdownHook.getHandlers().add(handlerAction);
shutdownHook.run();
assertThatIllegalStateException().isThrownBy(() -> shutdownHook.getHandlers().remove(handlerAction))
.withMessage("Shutdown in progress");
}
@Test
void failsWhenDeregisterActiveContext() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
ConfigurableApplicationContext context = new GenericApplicationContext();
shutdownHook.registerApplicationContext(context);
context.refresh();
assertThatIllegalStateException().isThrownBy(() -> shutdownHook.deregisterFailedApplicationContext(context));
assertThat(shutdownHook.isApplicationContextRegistered(context)).isTrue();
}
@Test
void deregistersFailedContext() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
GenericApplicationContext context = new GenericApplicationContext();
shutdownHook.registerApplicationContext(context);
context.registerBean(FailingBean.class);
assertThatExceptionOfType(BeanCreationException.class).isThrownBy(context::refresh);
assertThat(shutdownHook.isApplicationContextRegistered(context)).isTrue();
shutdownHook.deregisterFailedApplicationContext(context);
assertThat(shutdownHook.isApplicationContextRegistered(context)).isFalse();
}
@Test
void handlersRunInDeterministicOrderFromLastRegisteredToFirst() {
TestSpringApplicationShutdownHook shutdownHook = new TestSpringApplicationShutdownHook();
Runnable r1 = mock(Runnable.class);
Runnable r2 = mock(Runnable.class);
Runnable r3 = mock(Runnable.class);
shutdownHook.getHandlers().add(r2);
shutdownHook.getHandlers().add(r1);
shutdownHook.getHandlers().add(r3);
shutdownHook.run();
InOrder ordered = inOrder(r1, r2, r3);
ordered.verify(r3).run();
ordered.verify(r1).run();
ordered.verify(r2).run();
ordered.verifyNoMoreInteractions();
}
static class TestSpringApplicationShutdownHook extends SpringApplicationShutdownHook {
private boolean runtimeShutdownHookAdded;
@Override
protected void addRuntimeShutdownHook() {
this.runtimeShutdownHookAdded = true;
}
boolean isRuntimeShutdownHookAdded() {
return this.runtimeShutdownHookAdded;
}
}
static class TestApplicationContext extends AbstractApplicationContext {
private final ConfigurableListableBeanFactory beanFactory = new DefaultListableBeanFactory();
private final List<Object> finished;
private final @Nullable CountDownLatch closing;
private final @Nullable CountDownLatch proceedWithClose;
TestApplicationContext(List<Object> finished) {
this(finished, null, null);
}
TestApplicationContext(List<Object> finished, @Nullable CountDownLatch closing,
@Nullable CountDownLatch proceedWithClose) {
this.finished = finished;
this.closing = closing;
this.proceedWithClose = proceedWithClose;
}
@Override
protected void refreshBeanFactory() {
}
@Override
protected void closeBeanFactory() {
}
@Override
protected void onClose() {
if (this.closing != null) {
this.closing.countDown();
}
if (this.proceedWithClose != null) {
try {
this.proceedWithClose.await(1, TimeUnit.MINUTES);
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
this.finished.add(this);
}
@Override
public ConfigurableListableBeanFactory getBeanFactory() {
return this.beanFactory;
}
}
static class TestHandlerAction implements Runnable {
private final List<Object> finished;
TestHandlerAction(List<Object> finished) {
this.finished = finished;
}
@Override
public void run() {
this.finished.add(this);
}
}
static class CloseContextAndExit implements InitializingBean {
private final ConfigurableApplicationContext context;
private final Runnable shutdownHook;
CloseContextAndExit(ConfigurableApplicationContext context, SpringApplicationShutdownHook shutdownHook) {
this.context = context;
this.shutdownHook = shutdownHook;
}
@Override
public void afterPropertiesSet() throws Exception {
this.context.close();
// Simulate System.exit by running the hook on a separate thread and waiting
// for it to complete
Thread thread = new Thread(this.shutdownHook);
thread.start();
thread.join(15000);
assertThat(thread.isAlive()).isFalse();
}
}
static class FailingBean implements InitializingBean {
@Override
public void afterPropertiesSet() throws Exception {
throw new IllegalArgumentException("test failure");
}
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/test/java/org/springframework/boot/SpringApplicationShutdownHookTests.java |
import { EXAMPLE_TOOL_NAME, EXAMPLE_TOOL_URL } from "../lib/constants";
export default function Intro() {
return (
<section className="flex-col md:flex-row flex items-center md:justify-between mt-16 mb-16 md:mb-12">
<h1 className="text-6xl md:text-8xl font-bold tracking-tighter leading-tight md:pr-8">
Blog.
</h1>
<h4 className="text-center md:text-left text-lg mt-5 md:pl-8">
A statically generated blog example using{" "}
<a
href="https://nextjs.org/"
className="underline hover:text-success duration-200 transition-colors"
>
Next.js
</a>{" "}
and{" "}
<a
href={EXAMPLE_TOOL_URL}
className="underline hover:text-success duration-200 transition-colors"
>
{EXAMPLE_TOOL_NAME}
</a>
.
</h4>
</section>
);
} | typescript | github | https://github.com/vercel/next.js | examples/cms-enterspeed/components/intro.tsx |
#[cfg(feature = "jvm-callback-support")]
use alloc::boxed::Box;
use core::ffi::c_void;
#[cfg(feature = "jvm-callback-support")]
use jni::{
objects::{GlobalRef, JObject},
sys::jlong,
JNIEnv,
};
/// Struct representing a callback from Rust into a foreign language
///
/// This is largely used internally by the Diplomat macro, and should not need to be constructed
/// manually outside of that context
#[repr(C)]
pub struct DiplomatCallback<ReturnType> {
/// Any data required to run the callback; e.g. a pointer to the
/// callback wrapper object in the foreign runtime + the runtime itself
pub data: *mut c_void,
/// Function to actually run the callback. Note the first param is mutable, but depending
/// on if this is passed to a Fn or FnMut may not actually need to be.
/// FFI-Callers of said functions should cast to mutable.
///
/// Takes in `self.data` and any number of additional arguments.
pub run_callback: unsafe extern "C" fn(*mut c_void, ...) -> ReturnType,
/// Function to destroy this callback struct.
///
/// Takes in `self.data`
pub destructor: Option<unsafe extern "C" fn(*mut c_void)>,
}
impl<ReturnType> Drop for DiplomatCallback<ReturnType> {
fn drop(&mut self) {
if let Some(destructor) = self.destructor {
unsafe {
(destructor)(self.data);
}
}
}
}
// return a pointer to a JNI GlobalRef, which is a JVM GC root to the object provided.
// this can then be stored as a field in a struct, so that the struct
// is not deallocated until the JVM calls a destructor that unwraps
// the GlobalRef so it can be dropped.
#[cfg(feature = "jvm-callback-support")]
#[no_mangle]
extern "system" fn create_rust_jvm_cookie<'local>(
env: JNIEnv<'local>,
obj_to_ref: JObject<'local>,
) -> jlong {
let global_ref = env.new_global_ref(obj_to_ref).unwrap();
Box::into_raw(Box::new(global_ref)) as jlong
}
#[cfg(feature = "jvm-callback-support")]
#[no_mangle]
extern "system" fn destroy_rust_jvm_cookie(global_ref_boxed: jlong) {
unsafe {
drop(Box::from_raw(global_ref_boxed as *mut GlobalRef));
}
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/diplomat-runtime/src/callback.rs |
try:
from enum import Enum
except ImportError:
# Use fallback package for python < 3.4
from flufl.enum import Enum
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Enum as DbEnum
from sqlalchemy.types import Integer
from sqlalchemy.types import UnicodeText
from sandglass.time.models import BaseModel
from sandglass.time.models import create_index
class TAG(Enum):
# Tags used by the system
system = u'system'
# Tags used by the accounting users
accounting = u'accounting'
# Tags used by employees
activity = u'activity'
# Supported types of tags
TAG_TYPES = [item.value for item in TAG]
class Tag(BaseModel):
"""
Model definition for tags.
"""
name = Column(
UnicodeText(40),
nullable=False)
description = Column(
UnicodeText())
tag_type = Column(
DbEnum(*TAG_TYPES, native_enum=False),
default=TAG.activity.value,
nullable=False)
original_id = Column(
Integer,
ForeignKey('time_tag.id'),
doc="If current tag is an alias this is the ID of the original tag")
user_id = Column(
Integer,
ForeignKey('time_user.id'),
nullable=False,
doc="User that created the tag")
user = relationship(
"User",
uselist=False,
back_populates="tags")
aliases = relationship(
"Tag",
lazy=True,
join_depth=1,
backref=backref("original", remote_side=(lambda: Tag.id)))
@declared_attr
def __table_args__(cls):
return (
# Create field indexes
create_index(cls, 'name'),
create_index(cls, 'tag_type'),
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Liquid is a form management tool for web frameworks.
Copyright (C) 2014, Bence Faludi (b.faludi@mito.hu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, <see http://www.gnu.org/licenses/>.
"""
from . import widgets
# dict
def flattenAbsData( value_dict ):
"""
Flat a multi-dimensional dictionary into one-dimensional dictionary.
The keys will be concenated automatically.
@param value_dict: Multi-dimensional dictionary
@type value_dict: dict
@return: One-dimensional dictionary
@type: dict
"""
r_dict = {}
for key, value in ( value_dict or {} ).items():
if not isinstance( value, dict ):
r_dict[ key ] = value
continue
for ikey, ivalue in flattenAbsData( value ).items():
r_dict[ u'{}_{}'.format( key, ikey ) ] = ivalue
return r_dict
class Form( object ):
"""
Form class to render HTML forms by Liquid.
"""
# void
def __init__( self, element, value = None, submit = u'Submit', \
buttons = None, cls = None ):
"""
Form class to render HTML forms by Liquid. It requires an Element
object and it will clone this element to create the form's basic
element (we won't use the schema element).
@param element: schema element object
@type: elements.Element
@param value: Initial value
@type value: dict
@param submit: Submit button's label
@type submit: unicode
@param buttons: list of Button widgets
@type buttons: list<widgets.Button>
@param cls: HTML class
@type cls: unicode
"""
# Clone the schema element
self._element = element.clone()
# Calculate abstract name for every child
self._element.setAbsName()
# Set the initial values
self._element.setValue( value )
# Add Submit button
self._buttons = [
widgets.Button(
submit,
name = 'submit',
is_primary = True
)
] + ( buttons or [] )
# Define other variables
self._widget = widgets.Form()
self._cls = cls
self.valid = None
# elements.Element
def __getattr__( self, attr ):
"""
Form is working like a FieldSet, so you can use .attr to
returns an Element.
@param attr: Attribute name
@type attr: unicode
@return: Field object
@type: elements.Element
"""
return getattr( self.getElement(), attr )
# void
def setErrorWidgets( self, widget ):
"""
Set the element's widget's error widget. Its responsible for
how show the given error messages in the form.
@param widget: Error widget object
@type widget: widgets.Widget
"""
self.getElement().setErrorWidget( widget )
# list<widgets.Button>
def getButtons( self ):
"""
Returns all buttons for the Form.
@return: List of all button
@rtype: list<widgets.Button>
"""
return self._buttons
# elements.Element
def getElement( self ):
"""
Returns basic element's clone.
@return: Basic element
@rtype: elements.Element
"""
return self._element
# unicode
def getClass( self ):
"""
Returns HTML class
@return: HTML class
@rtype: unicode
"""
return self._cls
# tuple<bool,list>
def isValid( self, return_list = False ):
"""
Checks the validity of the form's basic element.
@param return_list: Returns list of error
@type return_list: bool
@return: Validity of the form
@rtype: bool (or tuple<bool,list<tuple<unicode,unicode>>>)
"""
if return_list:
return self.getElement().isValid()
valid, _ = self.getElement().isValid()
self.valid = valid
return valid
# unicode
def render( self ):
"""
Render the form
@return: HTML
@rtype: unicode
"""
return self._widget.render( self )
# dict
def getValue( self ):
"""
Returns of the basic element's values
@return: Element's values
@rtype: dict
"""
return self.getElement().getValue()
# void
def setValue( self, value ):
"""
Set the values of the element.
@param value: New values of the element
@type value: dict
"""
self.getElement().setValue( value )
# void
def delValue( self ):
"""
Empty the element.
"""
self.getElement().delValue()
# Value property
value = property( getValue, setValue, delValue ) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import datetime
import os
import posixpath
import subprocess
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import generate_make
import parse_dsc
BASIC_DESC = {
'TOOLS': ['clang-newlib', 'glibc'],
'TARGETS': [
{
'NAME' : 'hello_world',
'TYPE' : 'main',
'SOURCES' : ['hello_world.c'],
},
],
'DEST' : 'examples/api'
}
class TestValidateFormat(unittest.TestCase):
def _validate(self, src, expected_failure):
try:
parse_dsc.ValidateFormat(src, parse_dsc.DSC_FORMAT)
except parse_dsc.ValidationError as e:
if expected_failure:
self.assertEqual(str(e), expected_failure)
return
raise
def testGoodDesc(self):
testdesc = copy.deepcopy(BASIC_DESC)
self._validate(testdesc, None)
def testMissingKey(self):
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TOOLS']
self._validate(testdesc, 'Missing required key TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
del testdesc['TARGETS'][0]['NAME']
self._validate(testdesc, 'Missing required key NAME.')
def testNonEmpty(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = []
self._validate(testdesc, 'Expected non-empty value for TOOLS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'] = []
self._validate(testdesc, 'Expected non-empty value for TARGETS.')
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TARGETS'][0]['NAME'] = ''
self._validate(testdesc, 'Expected non-empty value for NAME.')
def testBadValue(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['clang-newlib', 'glibc', 'badtool']
self._validate(testdesc, 'Value badtool not expected in TOOLS.')
def testExpectStr(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = ['clang-newlib', True, 'glibc']
self._validate(testdesc, 'Value True not expected in TOOLS.')
def testExpectList(self):
testdesc = copy.deepcopy(BASIC_DESC)
testdesc['TOOLS'] = 'clang-newlib'
self._validate(testdesc, 'Key TOOLS expects LIST not STR.')
# TODO(bradnelson): Add test which generates a real make and runs it.
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
The built-in function traits are generic over a tuple of the function arguments.
If one uses angle-bracket notation (`Fn<(T,), Output=U>`) instead of parentheses
(`Fn(T) -> U`) to denote the function trait, the type parameter should be a
tuple. Otherwise function call notation cannot be used and the trait will not be
implemented by closures.
The most likely source of this error is using angle-bracket notation without
wrapping the function argument type into a tuple, for example:
```compile_fail,E0059
#![feature(unboxed_closures)]
fn foo<F: Fn<i32>>(f: F) -> F::Output { f(3) }
```
It can be fixed by adjusting the trait bound like this:
```
#![feature(unboxed_closures)]
fn foo<F: Fn<(i32,)>>(f: F) -> F::Output { f(3) }
```
Note that `(T,)` always denotes the type of a 1-tuple containing an element of
type `T`. The comma is necessary for syntactic disambiguation. | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0059.md |
# (c) 2013-2014, Benno Joy <benno@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible.utils import template
from ansible import utils
from ansible import errors
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
if not module_args:
result = dict(failed=True, msg="No source file given")
return ReturnData(conn=conn, comm_ok=True, result=result)
source = module_args
source = template.template(self.runner.basedir, source, inject)
if '_original_file' in inject:
source = utils.path_dwim_relative(inject['_original_file'], 'vars', source, self.runner.basedir)
else:
source = utils.path_dwim(self.runner.basedir, source)
if os.path.exists(source):
data = utils.parse_yaml_from_file(source, vault_password=self.runner.vault_pass)
if data and type(data) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % source)
elif data is None:
data = {}
result = dict(ansible_facts=data)
return ReturnData(conn=conn, comm_ok=True, result=result)
else:
result = dict(failed=True, msg="Source file not found.", file=source)
return ReturnData(conn=conn, comm_ok=True, result=result) | unknown | codeparrot/codeparrot-clean | ||
import py
import pytest
from py._iniconfig import IniConfig, ParseError, __all__ as ALL
from py._iniconfig import iscommentline
from textwrap import dedent
def pytest_generate_tests(metafunc):
if 'input' in metafunc.funcargnames:
for name, (input, expected) in check_tokens.items():
metafunc.addcall(id=name, funcargs={
'input': input,
'expected': expected,
})
elif hasattr(metafunc.function, 'multi'):
kwargs = metafunc.function.multi.kwargs
names, values = zip(*kwargs.items())
values = cartesian_product(*values)
for p in values:
metafunc.addcall(funcargs=dict(zip(names, p)))
def cartesian_product(L,*lists):
# copied from http://bit.ly/cyIXjn
if not lists:
for x in L:
yield (x,)
else:
for x in L:
for y in cartesian_product(lists[0],*lists[1:]):
yield (x,)+y
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line':(
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
@py.test.mark.multi(line=[
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
py.test.raises(TypeError, "IniConfig(data=path.read())")
def test_iniconfig_section_first(tmpdir):
excinfo = py.test.raises(ParseError, """
IniConfig("x", data='name=1')
""")
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\n[section]')
""")
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\nname = Alice\nname = bob')
""")
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
)
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2','value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
py.test.raises(KeyError,'config["other"]')
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items==[('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2011 Webdriver_name committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of the ChromeDriver
"""
def __init__(self, executable_path, port=0, service_args=None,
log_path=None, env=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the ChromeDriver
- port : Port the service is running on
- service_args : List of args to pass to the chromedriver service
- log_path : Path for the chromedriver service to log to"""
self.port = port
self.path = executable_path
self.service_args = service_args or []
if log_path:
self.service_args.append('--log-path=%s' % log_path)
if self.port == 0:
self.port = utils.free_port()
self.env = env
def start(self):
"""
Starts the ChromeDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
env = self.env or os.environ
try:
self.process = subprocess.Popen([
self.path,
"--port=%d" % self.port] +
self.service_args, env=env, stdout=PIPE, stderr=PIPE)
except:
raise WebDriverException(
"ChromeDriver executable needs to be available in the path. \
Please download from http://chromedriver.storage.googleapis.com/index.html\
and read up at http://code.google.com/p/selenium/wiki/ChromeDriver")
count = 0
while not utils.is_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the ChromeDriver")
@property
def service_url(self):
"""
Gets the url of the ChromeDriver Service
"""
return "http://localhost:%d" % self.port
def stop(self):
"""
Tells the ChromeDriver to stop and cleans up the process
"""
#If its dead dont worry
if self.process is None:
return
#Tell the Server to die!
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
count = 0
while utils.is_connectable(self.port):
if count == 30:
break
count += 1
time.sleep(1)
#Tell the Server to properly die in case
try:
if self.process:
self.process.kill()
self.process.wait()
except OSError:
# kill may not be available under windows environment
pass | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print(f"Building PyTorch model from configuration: {config}")
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | python | github | https://github.com/huggingface/transformers | src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py |
#!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_aaaa_record
version_added: "2.6"
author: "Blair Rampling (@brampling)"
short_description: Configure Infoblox NIOS AAAA records
description:
- Adds and/or removes instances of AAAA record objects from
Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system
required: true
view:
description:
- Sets the DNS view to associate this AAAA record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
ipv6addr:
description:
- Configures the IPv6 address for this AAAA record.
required: true
aliases:
- ipv6
ttl:
description:
- Configures the TTL to be associated with this AAAA record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure an AAAA record
nios_aaaa_record:
name: aaaa.ansible.com
ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing AAAA record
nios_aaaa_record:
name: aaaa.ansible.com
ipv4: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove an AAAA record from the system
nios_aaaa_record:
name: aaaa.ansible.com
ipv4: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update an AAAA record name
nios_aaaa_record:
name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com}
ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
ipv6addr=dict(aliases=['ipv6'], ib_req=True),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_AAAA_RECORD, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import string, random
from time import time
from private_settings import SERVER_ADDRESS
ECHO_PORT = 7
LEN_PACKET = 127
N_PACKETS = 5000
TOT_BITS = float(LEN_PACKET * N_PACKETS * 8) * 2
MEGA = float(1024 * 1024)
UPDATE_STEP = (N_PACKETS/10)
class TCP_EchoClient:
def __init__(self, host):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, ECHO_PORT))
self.packet = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(LEN_PACKET))
def __packet(self):
# Comment out the checks when measuring the throughput
# self.packet = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(LEN_PACKET))
self.s.send(self.packet)
data = self.s.recv(LEN_PACKET)
# assert self.packet == data, "packet error:\n%s\n%s\n" % (self.packet, data)
def test(self):
start = time()
for i in range(N_PACKETS):
if (i % UPDATE_STEP) == 0: print '%.2f%%' % ((float(i)/float(N_PACKETS)) * 100.)
self.__packet()
t = time() - start
print 'Throughput: (%.2f)Mbits/s' % ((TOT_BITS / t)/MEGA)
def __del__(self):
self.s.close()
while True:
e = TCP_EchoClient(SERVER_ADDRESS)
e.test() | unknown | codeparrot/codeparrot-clean | ||
"""
sentry.tasks.fetch_source
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import itertools
import logging
import hashlib
import re
import urllib2
import zlib
import base64
from os.path import splitext
from collections import namedtuple
from simplejson import JSONDecodeError
from urlparse import urljoin, urlsplit
from sentry.constants import SOURCE_FETCH_TIMEOUT, MAX_CULPRIT_LENGTH
from sentry.utils.cache import cache
from sentry.utils.sourcemaps import sourcemap_to_index, find_source
from sentry.utils.strings import truncatechars
BAD_SOURCE = -1
# number of surrounding lines (on each side) to fetch
LINES_OF_CONTEXT = 5
CHARSET_RE = re.compile(r'charset=(\S+)')
DEFAULT_ENCODING = 'utf-8'
BASE64_SOURCEMAP_PREAMBLE = 'data:application/json;base64,'
BASE64_PREAMBLE_LENGTH = len(BASE64_SOURCEMAP_PREAMBLE)
CLEAN_MODULE_RE = re.compile(r"""^(?:(?:
(?:java)?scripts?|js|build|static|[_\.].*?| # common folder prefixes
v?(?:\d+\.)*\d+| # version numbers, v1, 1.0.0
[a-f0-9]{7,8}| # short sha
[a-f0-9]{32}| # md5
[a-f0-9]{40} # sha1
)/)+""", re.X | re.I)
UrlResult = namedtuple('UrlResult', ['url', 'headers', 'body'])
logger = logging.getLogger(__name__)
def trim_line(line):
line = line.strip('\n')
if len(line) > 150:
line = line[:140] + ' [... truncated]'
return line
def get_source_context(source, lineno, context=LINES_OF_CONTEXT):
# lineno's in JS are 1-indexed
# just in case. sometimes math is hard
if lineno > 0:
lineno -= 1
lower_bound = max(0, lineno - context)
upper_bound = min(lineno + 1 + context, len(source))
try:
pre_context = map(trim_line, source[lower_bound:lineno])
except IndexError:
pre_context = []
try:
context_line = trim_line(source[lineno])
except IndexError:
context_line = ''
try:
post_context = map(trim_line, source[(lineno + 1):upper_bound])
except IndexError:
post_context = []
return pre_context, context_line, post_context
def discover_sourcemap(result):
"""
Given a UrlResult object, attempt to discover a sourcemap.
"""
# When coercing the headers returned by urllib to a dict
# all keys become lowercase so they're normalized
sourcemap = result.headers.get('sourcemap', result.headers.get('x-sourcemap'))
if not sourcemap:
parsed_body = result.body.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(parsed_body) > 10:
possibilities = set(parsed_body[:5] + parsed_body[-5:])
else:
possibilities = set(parsed_body)
for line in possibilities:
if line.startswith('//@ sourceMappingURL=') or line.startswith('//# sourceMappingURL='):
# We want everything AFTER the indicator, which is 21 chars long
sourcemap = line[21:].rstrip()
break
if sourcemap:
# fix url so its absolute
sourcemap = urljoin(result.url, sourcemap)
return sourcemap
def fetch_url_content(url):
"""
Pull down a URL, returning a tuple (url, headers, body).
"""
import sentry
try:
opener = urllib2.build_opener()
opener.addheaders = [
('Accept-Encoding', 'gzip'),
('User-Agent', 'Sentry/%s' % sentry.VERSION),
]
req = opener.open(url, timeout=SOURCE_FETCH_TIMEOUT)
headers = dict(req.headers)
body = req.read()
if headers.get('content-encoding') == 'gzip':
# Content doesn't *have* to respect the Accept-Encoding header
# and may send gzipped data regardless.
# See: http://stackoverflow.com/questions/2423866/python-decompressing-gzip-chunk-by-chunk/2424549#2424549
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
try:
content_type = headers['content-type']
except KeyError:
# If there is no content_type header at all, quickly assume default utf-8 encoding
encoding = DEFAULT_ENCODING
else:
try:
encoding = CHARSET_RE.search(content_type).group(1)
except AttributeError:
encoding = DEFAULT_ENCODING
body = body.decode(encoding).rstrip('\n')
except Exception:
logging.info('Failed fetching %r', url, exc_info=True)
return BAD_SOURCE
return (url, headers, body)
def fetch_url(url):
"""
Pull down a URL, returning a UrlResult object.
Attempts to fetch from the cache.
"""
cache_key = 'fetch_url:v2:%s' % (
hashlib.md5(url.encode('utf-8')).hexdigest(),)
result = cache.get(cache_key)
if result is None:
result = fetch_url_content(url)
cache.set(cache_key, result, 30)
if result == BAD_SOURCE:
return result
return UrlResult(*result)
def fetch_sourcemap(url):
if is_data_uri(url):
body = base64.b64decode(url[BASE64_PREAMBLE_LENGTH:])
else:
result = fetch_url(url)
if result == BAD_SOURCE:
return
body = result.body
# According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v)
# A SourceMap may be prepended with ")]}'" to cause a Javascript error.
# If the file starts with that string, ignore the entire first line.
if body.startswith(")]}'"):
body = body.split('\n', 1)[1]
try:
index = sourcemap_to_index(body)
except (JSONDecodeError, ValueError):
return
else:
return index
def is_data_uri(url):
return url[:BASE64_PREAMBLE_LENGTH] == BASE64_SOURCEMAP_PREAMBLE
def expand_javascript_source(data, **kwargs):
"""
Attempt to fetch source code for javascript frames.
Frames must match the following requirements:
- lineno >= 0
- colno >= 0
- abs_path is the HTTP URI to the source
- context_line is empty
Mutates the input ``data`` with expanded context if available.
"""
from sentry.interfaces import Stacktrace
try:
stacktraces = [
Stacktrace(**e['stacktrace'])
for e in data['sentry.interfaces.Exception']['values']
if e.get('stacktrace')
]
except KeyError:
stacktraces = []
if not stacktraces:
logger.debug('No stacktrace for event %r', data['event_id'])
return
# build list of frames that we can actually grab source for
frames = []
for stacktrace in stacktraces:
frames.extend([
f for f in stacktrace.frames
if f.lineno is not None
and f.is_url()
])
if not frames:
logger.debug('Event %r has no frames with enough context to fetch remote source', data['event_id'])
return data
pending_file_list = set()
done_file_list = set()
sourcemap_capable = set()
source_code = {}
sourmap_idxs = {}
for f in frames:
pending_file_list.add(f.abs_path)
if f.colno is not None:
sourcemap_capable.add(f.abs_path)
while pending_file_list:
filename = pending_file_list.pop()
done_file_list.add(filename)
# TODO: respect cache-contro/max-age headers to some extent
logger.debug('Fetching remote source %r', filename)
result = fetch_url(filename)
if result == BAD_SOURCE:
logger.debug('Bad source file %r', filename)
continue
# If we didn't have a colno, a sourcemap wont do us any good
if filename not in sourcemap_capable:
logger.debug('Not capable of sourcemap: %r', filename)
source_code[filename] = (result.body.splitlines(), None)
continue
sourcemap = discover_sourcemap(result)
# TODO: we're currently running splitlines twice
if not sourcemap:
source_code[filename] = (result.body.splitlines(), None)
continue
else:
logger.debug('Found sourcemap %r for minified script %r', sourcemap[:256], result.url)
sourcemap_key = hashlib.md5(sourcemap).hexdigest()
source_code[filename] = (result.body.splitlines(), sourcemap_key)
if sourcemap in sourmap_idxs:
continue
# pull down sourcemap
index = fetch_sourcemap(sourcemap)
if not index:
logger.debug('Failed parsing sourcemap index: %r', sourcemap[:15])
continue
if is_data_uri(sourcemap):
sourmap_idxs[sourcemap_key] = (index, result.url)
else:
sourmap_idxs[sourcemap_key] = (index, sourcemap)
# queue up additional source files for download
for source in index.sources:
next_filename = urljoin(sourcemap, source)
if next_filename not in done_file_list:
if index.content:
source_code[next_filename] = (index.content[source], None)
done_file_list.add(next_filename)
else:
pending_file_list.add(next_filename)
last_state = None
state = None
has_changes = False
for frame in frames:
try:
source, sourcemap = source_code[frame.abs_path]
except KeyError:
# we must've failed pulling down the source
continue
# may have had a failure pulling down the sourcemap previously
if sourcemap in sourmap_idxs and frame.colno is not None:
index, relative_to = sourmap_idxs[sourcemap]
last_state = state
state = find_source(index, frame.lineno, frame.colno)
abs_path = urljoin(relative_to, state.src)
logger.debug('Mapping compressed source %r to mapping in %r', frame.abs_path, abs_path)
try:
source, _ = source_code[abs_path]
except KeyError:
frame.data = {
'sourcemap': sourcemap,
}
logger.debug('Failed mapping path %r', abs_path)
else:
# Store original data in annotation
frame.data = {
'orig_lineno': frame.lineno,
'orig_colno': frame.colno,
'orig_function': frame.function,
'orig_abs_path': frame.abs_path,
'orig_filename': frame.filename,
'sourcemap': sourcemap,
}
# SourceMap's return zero-indexed lineno's
frame.lineno = state.src_line + 1
frame.colno = state.src_col
# The offending function is always the previous function in the stack
# Honestly, no idea what the bottom most frame is, so we're ignoring that atm
frame.function = last_state.name if last_state else state.name
frame.abs_path = abs_path
frame.filename = state.src
frame.module = generate_module(state.src) or '<unknown module>'
elif sourcemap in sourmap_idxs:
frame.data = {
'sourcemap': sourcemap,
}
has_changes = True
# TODO: theoretically a minified source could point to another mapped, minified source
frame.pre_context, frame.context_line, frame.post_context = get_source_context(
source=source, lineno=frame.lineno)
if has_changes:
logger.debug('Updating stacktraces with expanded source context')
for exception, stacktrace in itertools.izip(data['sentry.interfaces.Exception']['values'], stacktraces):
exception['stacktrace'] = stacktrace.serialize()
# Attempt to fix the culrpit now that we have useful information
culprit_frame = stacktraces[0].frames[-1]
if culprit_frame.module and culprit_frame.function:
data['culprit'] = truncatechars(generate_culprit(culprit_frame), MAX_CULPRIT_LENGTH)
def generate_module(src):
"""
Converts a url into a made-up module name by doing the following:
* Extract just the path name
* Trimming off the initial /
* Trimming off the file extension
* Removes off useless folder prefixes
e.g. http://google.com/js/v1.0/foo/bar/baz.js -> foo/bar/baz
"""
return CLEAN_MODULE_RE.sub('', splitext(urlsplit(src).path[1:])[0])
def generate_culprit(frame):
return '%s in %s' % (frame.module, frame.function) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\CssSelector;
use Symfony\Component\CssSelector\Parser\Shortcut\ClassParser;
use Symfony\Component\CssSelector\Parser\Shortcut\ElementParser;
use Symfony\Component\CssSelector\Parser\Shortcut\EmptyStringParser;
use Symfony\Component\CssSelector\Parser\Shortcut\HashParser;
use Symfony\Component\CssSelector\XPath\Extension\HtmlExtension;
use Symfony\Component\CssSelector\XPath\Translator;
/**
* CssSelectorConverter is the main entry point of the component and can convert CSS
* selectors to XPath expressions.
*
* @author Christophe Coevoet <stof@notk.org>
*/
class CssSelectorConverter
{
private Translator $translator;
private array $cache;
private static array $xmlCache = [];
private static array $htmlCache = [];
/**
* @param bool $html Whether HTML support should be enabled. Disable it for XML documents
*/
public function __construct(bool $html = true)
{
$this->translator = new Translator();
if ($html) {
$this->translator->registerExtension(new HtmlExtension($this->translator));
$this->cache = &self::$htmlCache;
} else {
$this->cache = &self::$xmlCache;
}
$this->translator
->registerParserShortcut(new EmptyStringParser())
->registerParserShortcut(new ElementParser())
->registerParserShortcut(new ClassParser())
->registerParserShortcut(new HashParser())
;
}
/**
* Translates a CSS expression to its XPath equivalent.
*
* Optionally, a prefix can be added to the resulting XPath
* expression with the $prefix parameter.
*/
public function toXPath(string $cssExpr, string $prefix = 'descendant-or-self::'): string
{
return $this->cache[$prefix][$cssExpr] ??= $this->translator->cssToXPath($cssExpr, $prefix);
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Component/CssSelector/CssSelectorConverter.php |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the library page."""
import json
import logging
import string
from core.controllers import base
from core.domain import collection_services
from core.domain import exp_services
from core.domain import summary_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
(base_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration])
current_user_services = models.Registry.import_current_user_services()
def get_matching_activity_dicts(query_string, search_cursor):
"""Given a query string and a search cursor, returns a list of activity
dicts that satisfy the search query.
"""
# We only populate collections in the initial load, since the current
# frontend search infrastructure is set up to only deal with one search
# cursor at a time.
# TODO(sll): Remove this special casing.
collection_ids = []
if not search_cursor:
collection_ids, _ = (
collection_services.get_collection_ids_matching_query(
query_string))
exp_ids, new_search_cursor = (
exp_services.get_exploration_ids_matching_query(
query_string, cursor=search_cursor))
activity_list = []
activity_list = (
summary_services.get_displayable_collection_summary_dicts_matching_ids(
collection_ids))
activity_list += (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
if len(activity_list) == feconf.DEFAULT_QUERY_LIMIT:
logging.error(
'%s activities were fetched to load the library page. '
'You may be running up against the default query limits.'
% feconf.DEFAULT_QUERY_LIMIT)
return activity_list, new_search_cursor
class LibraryPage(base.BaseHandler):
"""The main library page. Used for both the default list of categories and
for search results.
"""
def get(self):
"""Handles GET requests."""
search_mode = 'search' in self.request.url
self.values.update({
'meta_description': (
feconf.SEARCH_PAGE_DESCRIPTION if search_mode
else feconf.LIBRARY_PAGE_DESCRIPTION),
'nav_mode': feconf.NAV_MODE_LIBRARY,
'has_fully_registered': bool(
self.user_id and
user_services.has_fully_registered(self.user_id)),
'LANGUAGE_CODES_AND_NAMES': (
utils.get_all_language_codes_and_names()),
'search_mode': search_mode,
'SEARCH_DROPDOWN_CATEGORIES': feconf.SEARCH_DROPDOWN_CATEGORIES,
})
self.render_template('pages/library/library.html')
class LibraryIndexHandler(base.BaseHandler):
"""Provides data for the default library index page."""
def get(self):
"""Handles GET requests."""
# TODO(sll): Support index pages for other language codes.
summary_dicts_by_category = summary_services.get_library_groups([
feconf.DEFAULT_LANGUAGE_CODE])
recently_published_summary_dicts = (
summary_services.get_recently_published_exploration_summary_dicts())
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[feconf.DEFAULT_LANGUAGE_CODE]))
featured_activity_summary_dicts = (
summary_services.get_featured_activity_summary_dicts(
[feconf.DEFAULT_LANGUAGE_CODE]))
preferred_language_codes = [feconf.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
if recently_published_summary_dicts:
summary_dicts_by_category.insert(0, {
'activity_summary_dicts': recently_published_summary_dicts,
'categories': [],
'header_i18n_id': feconf.LIBRARY_CATEGORY_RECENTLY_PUBLISHED,
'has_full_results_page': True,
})
if top_rated_activity_summary_dicts:
summary_dicts_by_category.insert(0, {
'activity_summary_dicts': top_rated_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS),
'has_full_results_page': True,
})
if featured_activity_summary_dicts:
summary_dicts_by_category.insert(0, {
'activity_summary_dicts': featured_activity_summary_dicts,
'categories': [],
'header_i18n_id': feconf.LIBRARY_CATEGORY_FEATURED_ACTIVITIES,
'has_full_results_page': False,
})
self.values.update({
'activity_summary_dicts_by_category': (
summary_dicts_by_category),
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class SearchHandler(base.BaseHandler):
"""Provides data for activity search results."""
def get(self):
"""Handles GET requests."""
query_string = utils.unescape_encoded_uri_component(
self.request.get('q'))
# Remove all punctuation from the query string, and replace it with
# spaces. See http://stackoverflow.com/a/266162 and
# http://stackoverflow.com/a/11693937
remove_punctuation_map = dict(
(ord(char), None) for char in string.punctuation)
query_string = query_string.translate(remove_punctuation_map)
if self.request.get('category'):
query_string += ' category=%s' % self.request.get('category')
if self.request.get('language_code'):
query_string += ' language_code=%s' % self.request.get(
'language_code')
search_cursor = self.request.get('cursor', None)
activity_list, new_search_cursor = get_matching_activity_dicts(
query_string, search_cursor)
self.values.update({
'activity_list': activity_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
class LibraryRedirectPage(base.BaseHandler):
"""An old 'gallery' page that should redirect to the library index page."""
def get(self):
"""Handles GET requests."""
self.redirect('/library')
class ExplorationSummariesHandler(base.BaseHandler):
"""Returns summaries corresponding to ids of public explorations. This
controller supports returning private explorations for the given user.
"""
def get(self):
"""Handles GET requests."""
try:
exp_ids = json.loads(self.request.get('stringified_exp_ids'))
except Exception:
raise self.PageNotFoundException
include_private_exps_str = self.request.get(
'include_private_explorations')
include_private_exps = (
include_private_exps_str.lower() == 'true'
if include_private_exps_str else False)
editor_user_id = self.user_id if include_private_exps else None
if not editor_user_id:
include_private_exps = False
if (not isinstance(exp_ids, list) or not all([
isinstance(exp_id, basestring) for exp_id in exp_ids])):
raise self.PageNotFoundException
if include_private_exps:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids,
editor_user_id=editor_user_id))
else:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values) | unknown | codeparrot/codeparrot-clean | ||
import sys
import os
import signal
import argparse
import importlib
import six
import time
from .contrib import bottle
from .util import GlobCheckerThread
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", default=8080,
help="Port to bind on.")
parser.add_argument("--host", default="0.0.0.0",
help="Host to bind.")
parser.add_argument("--echo", action="store_true",
help="Module echo, return the current http request detail.")
parser.add_argument("--echo-prefix", default="/_y3k/",
help="The path prefix for echo module")
parser.add_argument("--mock",
help="Enable mocking data via a json file.")
parser.add_argument("--mock-prefix", default="",
help="The path prefix for all mocking data.")
parser.add_argument("--mock-allow-host", default=False, action="store_true",
help="Allow Host header to be passed into upstream servers.")
parser.add_argument("--static", default="",
help="Serve static files with specified directory.")
parser.add_argument("--static-prefix", default="/",
help="The path prefix for serving static files with --static.")
parser.add_argument("--static-serve-dir", action="store_true",
help="Serve directories as a list.")
parser.add_argument("--zip", default="",
help="Serve a zip file as static directory.")
parser.add_argument("--zip-prefix", default="/",
help="The path prefix for the serving file with --zip.")
parser.add_argument("--zip-encoding", default="utf-8",
help="The encoding of zipfile")
parser.add_argument("--interact", action="store_true",
help="Attach to a interactive console.")
parser.add_argument("--interact-path", default="/_y3k/interact",
help="The path for interactive console.")
parser.add_argument("--reload", default="",
help="Auto reload ynm3k server when watched files matching the glob pattern changed.")
parser.add_argument("--server", default="auto",
help="Specify the web server for running ynm3k, "
"options available at https://bottlepy.org/docs/dev/deployment.html#switching-the-server-backend")
parser.add_argument("--version", action='store_true', default=False,
help="Show version and exit.")
args = parser.parse_args()
return args
def run_server(args):
if args['version']:
from . import __VERSION__
print("ynm3k %s" % __VERSION__)
return
if args['echo']:
from . import echo
object_echo = echo.ModuleEcho(args['echo_prefix'])
if args['interact']:
from . import interact
object_interact = interact.ModuleInteract(args['interact_path'])
if args['static']:
from . import static
object_static = static.ModuleStatic(args['static_prefix'],
path=args['static'],
serve_dir=args['static_serve_dir'])
if args['zip']:
from .modules import zip_serve
object_zip = zip_serve.ModuleZipServe(args['zip_prefix'],
args['zip'],
args['zip_encoding'])
if args['mock']:
from . import mock
object_mock = mock.ModuleMock(args['mock_prefix'], args['mock'],
allow_host=args['mock_allow_host'])
bottle.run(host=args['host'], port=int(args['port']),
debug=True, server=args["server"])
def is_port_in_use(port):
import socket
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', port))
sock.listen(1)
sock.close()
except socket.error as e:
return True
return False
def main():
args = vars(parse_args())
reload_pattern = args.pop('reload', None)
reload_count = 0
if not reload_pattern:
run_server(args)
else:
while True:
for i in range(3):
if not is_port_in_use(int(args.get('port'))):
break
time.sleep(0.1)
pid = os.fork()
if pid == 0:
bottle.app.push()
run_server(args)
break
else:
gct = GlobCheckerThread(reload_pattern)
gct.event_exit.clear()
with gct:
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
gct.event_exit.set()
if not gct.event_changed.is_set():
break
else:
reload_count += 1
print("Reloading ynm3k %s times."\
% reload_count)
time.sleep(1.0)
finally:
os.kill(pid, signal.SIGINT)
os.waitpid(pid, 0)
if __name__ == '__main__':
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf-8')
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import (print_function, unicode_literals, division,
absolute_import)
import argparse
import io
import json
import os
import re
import shutil
import subprocess
import sys
import tarfile
import zipfile
LOC = '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/'
ETC = "/etc/cinder/cinder.conf"
ETC_DEFAULT_RE = re.compile(r"^\[DEFAULT\]\s*$")
ETC_SECTION_RE = re.compile(r"^\[[Dd]atera\]\s*$")
def unarchive(afile):
if tarfile.is_tarfile(afile):
print("Archive is a tarfile")
with tarfile.open(afile) as tar:
tar.extractall()
return tar.namelist()
elif zipfile.is_zipfile(afile):
print("Archive is a zipfile")
with zipfile.ZipFile(afile) as z:
z.extractall()
return z.namelist()
else:
raise ValueError("Unsupported archive format")
def _cinder_fix_enabled_backends(lines, index):
line = lines[index]
v = line.split('=')[-1]
parts = v.split(',')
parts.append('datera')
newline = 'enabled_backends = {}'.format(','.join(parts))
lines[index] = newline
def _cinder_add_enabled_backends(lines, index):
lines.insert(index, 'enabled_backends = datera')
def _cinder_fix_default_volume_type(lines, index):
lines[index] = 'default_volume_type = datera'
def _cinder_add_default_volume_type(lines, index):
lines.insert(index, 'default_volume_type = datera')
def _cinder_fix_debug(lines, index):
lines[index] = 'debug = True'
def _cinder_add_debug(lines, index):
lines.insert(index, 'debug = True')
def _cinder_add_san(lines, index, conf):
lines.insert(index+1, 'san_ip = {}'.format(conf['mgmt_ip']))
def _cinder_fix_san(lines, index, conf):
lines[index] = 'san_ip = {}'.format(conf['mgmt_ip'])
def _cinder_add_user(lines, index, conf):
lines.insert(index+1, 'san_login = {}'.format(conf['username']))
def _cinder_fix_user(lines, index, conf):
lines[index] = 'san_login = {}'.format(conf['username'])
def _cinder_add_pass(lines, index, conf):
lines.insert(index+1, 'san_password = {}'.format(conf['password']))
def _cinder_fix_pass(lines, index, conf):
lines[index] = 'san_password = {}'.format(conf['password'])
def _cinder_add_vbn(lines, index):
lines.insert(index+1, 'volume_backend_name = datera')
def _cinder_fix_vbn(lines, index):
lines[index] = 'volume_backend_name = datera'
def _cinder_add_datera_debug(lines, index):
lines.insert(index+1, 'datera_debug = True')
def _cinder_fix_datera_debug(lines, index):
lines[index] = 'datera_debug = True'
def _cinder_add_tenant(lines, index, conf):
lines.insert(index, 'datera_tenant_id = {}'.format(conf['tenant']))
def _cinder_fix_tenant(lines, index, conf):
lines[index] = 'datera_tenant_id = {}'.format(conf['tenant'])
def _discover_section(lines, conf, name):
start = None
end = None
matcher = re.compile("^\[{}\]\s*$".format(name))
for i, line in enumerate(lines):
if matcher.match(line):
start = i
break
if start is None:
raise EnvironmentError(
"[DEFAULT] section missing from ETC: {}".format(conf))
end = start
section_match = re.compile("^\[.*\]")
for i, line in enumerate(lines[start+1:]):
if section_match.match(line):
break
end += 1
return start, end
def cinder_volume(conf, etc_conf, inplace):
if not os.path.isfile(etc_conf):
raise EnvironmentError(
"cinder-volume ETC not found at: {}".format(etc_conf))
lines = None
with io.open(etc_conf, 'r') as f:
lines = [elem.strip() for elem in f.readlines()]
# Handle [DEFAULT] section
default_start, default_end = _discover_section(lines, etc_conf, "DEFAULT")
enabled_backends = None
default_volume_type = None
debug = None
for i, line in enumerate(lines[default_start:default_end+1]):
if line.startswith("enabled_backends"):
enabled_backends = default_start + i
if line.startswith("default_volume_type"):
default_volume_type = default_start + i
if line.startswith("debug"):
debug = default_start + i
if enabled_backends and "datera" not in lines[enabled_backends]:
_cinder_fix_enabled_backends(lines, enabled_backends)
elif not enabled_backends:
_cinder_add_enabled_backends(lines, default_end)
if default_volume_type and "datera" not in lines[default_volume_type]:
_cinder_fix_default_volume_type(lines, default_volume_type)
elif not default_volume_type:
_cinder_add_default_volume_type(lines, default_end)
if debug and 'True' not in lines[debug]:
_cinder_fix_debug(lines, debug)
elif not debug:
_cinder_add_debug(lines, default_end)
# Handle [datera] section
dsection_start, dsection_end = _discover_section(lines, ETC, "datera")
if not dsection_start:
raise EnvironmentError(
"[datera] section missing from /etc/cinder/cinder.conf")
san_check = 0
user_check = 0
pass_check = 0
vbn_check = 0
debug_check = 0
tenant_check = 0
for i, line in enumerate(lines[dsection_start:dsection_end+1]):
if 'san_ip' in line:
san_check = dsection_start + i
if 'san_login' in line:
user_check = dsection_start + i
if 'san_password' in line:
pass_check = dsection_start + i
if 'volume_backend_name' in line:
vbn_check = dsection_start + i
if 'datera_debug ' in line:
debug_check = dsection_start + i
if 'datera_tenant_id' in line:
tenant_check = dsection_start + i
if not san_check:
_cinder_add_san(lines, dsection_end, conf)
else:
_cinder_fix_san(lines, san_check, conf)
if not user_check:
_cinder_add_user(lines, dsection_end, conf)
else:
_cinder_fix_user(lines, user_check, conf)
if not pass_check:
_cinder_add_pass(lines, dsection_end, conf)
else:
_cinder_fix_pass(lines, pass_check, conf)
if not vbn_check:
_cinder_add_vbn(lines, dsection_end)
else:
_cinder_fix_vbn(lines, vbn_check)
if not debug_check:
_cinder_add_datera_debug(lines, dsection_end)
else:
_cinder_fix_datera_debug(lines, debug_check)
if not tenant_check:
_cinder_add_tenant(lines, dsection_end, conf)
else:
_cinder_fix_tenant(lines, tenant_check, conf)
data = '\n'.join(lines)
if inplace:
with io.open(ETC, 'w+') as f:
f.write(data)
else:
print(data)
def main(args):
conf = None
with io.open(args.udc_file) as f:
conf = json.load(f)
print(conf)
if not args.just_conf:
src = None
print("Unarchiving: ", args.c_archive)
for name in unarchive(args.c_archive):
if name.endswith('/src/'):
src = os.path.join(name, 'datera')
dat_dir = os.path.join(args.dest, 'datera')
dat_file = os.path.join(args.dest, 'datera.py')
dat_file_2 = os.path.join(args.dest, 'datera.pyc')
# Remove any existing directory or files
try:
print("Removing:", dat_file)
os.remove(dat_file)
except OSError:
pass
try:
print("Removing:", dat_file_2)
os.remove(dat_file_2)
except OSError:
pass
try:
print("Removing:", dat_dir)
shutil.rmtree(dat_dir)
except OSError:
pass
print("Copying {} to {}".format(src, dat_dir))
shutil.copytree(src, dat_dir)
print("Unarchiving: ", args.p_archive)
psdk = None
for name in unarchive(args.p_archive):
if name.endswith('/src/'):
psdk = "/".join(name.rstrip("/").split("/")[:-1])
cmd = ["sudo", "pip", "install", "-e", psdk]
print("Running command: ", " ".join(cmd))
print(subprocess.check_output(cmd))
cinder_volume(conf, args.conf, args.inplace)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('c_archive',
help='Tarball or zipfile archive of the Datera '
'cinder-driver github repository')
parser.add_argument('p_archive',
help='Tarball or zipfile archive of the Datera python-'
'sdk')
parser.add_argument('udc_file',
help='Datera Universal Config File')
parser.add_argument('--dest', default=LOC,
help='Destination cinder/volume/drivers folder')
parser.add_argument('--conf', default=ETC,
help='Location of cinder.conf file to modify')
parser.add_argument('--just-conf', action='store_true')
parser.add_argument('--inplace', action='store_true')
args = parser.parse_args()
main(args)
sys.exit(0) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
import json
import requests
class YaDisk(object):
def __init__(self, token):
self.session = requests.session()
self.session.headers.update({'Authorization': 'OAuth ' + str(token),})
def get_key_url(client_data):
format_url = "https://oauth.yandex.ru/authorize?response_type=code&client_id={}&device_id={}&device_name={}&force_confirm=yes"
return format_url.format(client_data['client_id'], client_data['device_id'], client_data['device_name'])
def get_token(key, client_data):
res = requests.post('https://oauth.yandex.ru/token', data = {
'grant_type': 'authorization_code',
'code': key,
'client_id': client_data['client_id'],
'client_secret': client_data['client_secret'],
'device_id': client_data['device_id'],
'device_name': client_data['deivce_name'],
})
print(res.text)
return res.json()['access_token']
def _get(self, url, *args, **kwargs):
return self.session.get(url, *args, **kwargs)
def _post(self, url, data, *args, **kwargs):
return self.session.post(url, {'data': json.dumps(data), }, *args, **kwargs)
def _put(self, url, *args, **kwargs):
return self.session.put(url, *args, **kwargs)
def list_files(self, dir_path):
file_list = []
res = self._get("https://cloud-api.yandex.net:443/v1/disk/resources",
params={"path": "app:/" + dir_path,
"limit": "0",})
try:
res.json()['_embedded']
except:
print( res.text() )
res = self._get("https://cloud-api.yandex.net:443/v1/disk/resources",
params={"path": "app:/" + dir_path,
"limit": res.json()['_embedded']['total']})
try:
res.json()['_embedded']
except:
print( res.text() )
for file in res.json()['_embedded']['items']:
if file['type'] == 'file':
file_list.append(file['name'])
return file_list
def direct_link(self, file_path):
response = self._get("https://cloud-api.yandex.net:443/v1/disk/resources/download",
params={"path": "app:/" + file_path,})
return response.json()['href']
def upload(self, file_path, file):
response = self._get("https://cloud-api.yandex.net:443/v1/disk/resources/upload",
params={"path": "app:/" + file_path,
"overwrite": "true",})
try:
upload_url = response.json()['href']
self._put(upload_url, data = file)
except:
print('upload error') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End: | unknown | codeparrot/codeparrot-clean | ||
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://pythonhosted.org/Markdown/extensions/code_hilite.html>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import get_formatter_by_name
pygments = True
except ImportError:
pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError:
return []
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite(object):
"""
Determine language of source code, and pass it into pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenums: (Boolean) Set line numbering to 'on' (True),
'off' (False) or 'auto'(None). Set to 'auto' by default.
* guess_lang: (Boolean) Turn language auto-detection
'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
* hl_lines: (List of integers) Lines to emphasize, 1-indexed.
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenums=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4, hl_lines=None, use_pygments=True):
self.src = src
self.lang = lang
self.linenums = linenums
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
self.hl_lines = hl_lines or []
self.use_pygments = use_pygments
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = get_lexer_by_name('text')
except ValueError:
lexer = get_lexer_by_name('text')
formatter = get_formatter_by_name('html',
linenos=self.linenums,
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses,
hl_lines=self.hl_lines)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenums:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n' % \
(self.css_class, class_str, txt)
def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of
a code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.linenums is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.linenums = True
self.hl_lines = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
code = CodeHilite(
block[0].text,
linenums=self.config['linenums'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length,
use_pygments=self.config['use_pygments']
)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, *args, **kwargs):
# define default configs
self.config = {
'linenums': [None,
"Use lines numbers. True=yes, False=no, None=auto"],
'guess_lang': [True,
"Automatic language detection - Default: True"],
'css_class': ["codehilite",
"Set class name for wrapper <div> - "
"Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True']
}
super(CodeHiliteExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(*args, **kwargs):
return CodeHiliteExtension(*args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
"""Currency exchange rate support that comes from fixer.io."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_EXCHANGE_RATE = 'Exchange rate'
ATTR_TARGET = 'Target currency'
ATTRIBUTION = "Data provided by the European Central Bank (ECB)"
CONF_TARGET = 'target'
DEFAULT_BASE = 'USD'
DEFAULT_NAME = 'Exchange rate'
ICON = 'mdi:currency-usd'
SCAN_INTERVAL = timedelta(days=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_TARGET): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fixer.io sensor."""
from fixerio import Fixerio, exceptions
api_key = config.get(CONF_API_KEY)
name = config.get(CONF_NAME)
target = config.get(CONF_TARGET)
try:
Fixerio(symbols=[target], access_key=api_key).latest()
except exceptions.FixerioException:
_LOGGER.error("One of the given currencies is not supported")
return
data = ExchangeData(target, api_key)
add_entities([ExchangeRateSensor(data, name, target)], True)
class ExchangeRateSensor(Entity):
"""Representation of a Exchange sensor."""
def __init__(self, data, name, target):
"""Initialize the sensor."""
self.data = data
self._target = target
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._target
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.data.rate is not None:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_EXCHANGE_RATE: self.data.rate['rates'][self._target],
ATTR_TARGET: self._target,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._state = round(self.data.rate['rates'][self._target], 3)
class ExchangeData:
"""Get the latest data and update the states."""
def __init__(self, target_currency, api_key):
"""Initialize the data object."""
from fixerio import Fixerio
self.api_key = api_key
self.rate = None
self.target_currency = target_currency
self.exchange = Fixerio(
symbols=[self.target_currency], access_key=self.api_key)
def update(self):
"""Get the latest data from Fixer.io."""
self.rate = self.exchange.latest() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2015 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import json
from django.core.urlresolvers import reverse
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_watch_user_story(client):
user = f.UserFactory.create()
user_story = f.create_userstory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
url = reverse("userstories-watch", args=(user_story.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_unwatch_user_story(client):
user = f.UserFactory.create()
user_story = f.create_userstory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
url = reverse("userstories-unwatch", args=(user_story.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_list_user_story_watchers(client):
user = f.UserFactory.create()
user_story = f.UserStoryFactory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
f.WatchedFactory.create(content_object=user_story, user=user)
url = reverse("userstory-watchers-list", args=(user_story.id,))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data[0]['id'] == user.id
def test_get_user_story_watcher(client):
user = f.UserFactory.create()
user_story = f.UserStoryFactory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
watch = f.WatchedFactory.create(content_object=user_story, user=user)
url = reverse("userstory-watchers-detail", args=(user_story.id, watch.user.id))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['id'] == watch.user.id
def test_get_user_story_watchers(client):
user = f.UserFactory.create()
user_story = f.UserStoryFactory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
url = reverse("userstories-detail", args=(user_story.id,))
f.WatchedFactory.create(content_object=user_story, user=user)
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
def test_get_user_story_is_watched(client):
user = f.UserFactory.create()
user_story = f.UserStoryFactory(owner=user)
f.MembershipFactory.create(project=user_story.project, user=user, is_owner=True)
url_detail = reverse("userstories-detail", args=(user_story.id,))
url_watch = reverse("userstories-watch", args=(user_story.id,))
url_unwatch = reverse("userstories-unwatch", args=(user_story.id,))
client.login(user)
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watched'] == False
response = client.post(url_watch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['is_watched'] == True
response = client.post(url_unwatch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watched'] == False | unknown | codeparrot/codeparrot-clean | ||
intentional syntax error which should NOT be encountered | unknown | github | https://github.com/ansible/ansible | test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml |
# -*- coding: utf-8 -*-
""" Sahana Eden Support Requests
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3SupportModel"]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3SupportModel(S3Model):
"""
Support Requests
@ToDo: Should project_task not be used for this instead?
- we could even sync between the on-instance tickets &
a central ticketing system
"""
names = ["support_req"]
def model(self):
T = current.T
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
support_request_types = {
1 : T("Bug"),
2 : T("Feature Request")
}
support_status_opts = {
1 : T("Open"),
2 : T("Closed")
}
tablename = "support_req"
table = self.define_table(tablename,
Field("name", notnull=True,
label=T("Short Description")),
Field("type", "integer",
label=T("Type"),
requires=IS_IN_SET(support_request_types,
zero="%s..." % T("Please select")),
represent=lambda opt: \
support_request_types.get(opt,
UNKNOWN_OPT)),
Field("details", "text",
label = T("Details"),
comment = "%s%s" % (T("Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened."),
T("If a ticket was issued then please provide the Ticket ID."))),
Field("status", "integer",
label=T("Status"),
default=1,
requires=IS_IN_SET(support_status_opts),
represent=lambda opt: \
support_status_opts.get(opt, UNKNOWN_OPT)),
Field("actions", "text",
label = T("Actions"),
comment = T("Actions taken as a result of this request.")),
*s3_meta_fields())
# CRUD strings
ADD_REQUEST = T("New Support Request")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_REQUEST,
title_display = T("Request Details"),
title_list = T("Support Requests"),
title_update = T("Edit Request"),
title_search = T("Search Support Requests"),
subtitle_create = T("Add New Request"),
label_list_button = T("List Support Requests"),
label_create_button = ADD_REQUEST,
label_delete_button = T("Delete Request"),
msg_record_created = T("Request added"),
msg_record_modified = T("Request updated"),
msg_record_deleted = T("Request deleted"),
msg_list_empty = T("No Support Requests currently registered"))
# ---------------------------------------------------------------------
return Storage()
# END ========================================================================= | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2006-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Model for languages."""
__all__ = [
'Language',
]
from mailman.database.model import Model
from mailman.interfaces.languages import ILanguage
from sqlalchemy import Column, Integer, Unicode
from zope.interface import implementer
@implementer(ILanguage)
class Language(Model):
"""See `ILanguage`."""
__tablename__ = 'language'
id = Column(Integer, primary_key=True)
code = Column(Unicode) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Illuminate\Database\Eloquent\Builder;
use Illuminate\Database\Eloquent\Collection;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Database\Eloquent\Relations\HasMany;
use Illuminate\Database\Eloquent\Relations\HasOne;
use Illuminate\Database\Query\Builder as QueryBuilder;
use Mockery as m;
use PHPUnit\Framework\TestCase;
class EloquentHasOneOrManyDeprecationTest extends TestCase
{
public function testHasManyMatchWithNullLocalKey(): void
{
$relation = $this->getHasManyRelation();
$result1 = new HasOneOrManyDeprecationModelStub;
$result1->foreign_key = 1;
$result2 = new HasOneOrManyDeprecationModelStub;
$result2->foreign_key = '';
$model1 = new HasOneOrManyDeprecationModelStub;
$model1->id = 1;
$model2 = new HasOneOrManyDeprecationModelStub;
$model2->id = null;
$relation->getRelated()->shouldReceive('newCollection')->andReturnUsing(function ($array) {
return new Collection($array);
});
$models = $relation->match([$model1, $model2], new Collection([$result1, $result2]), 'foo');
$this->assertCount(1, $models[0]->foo);
$this->assertNull($models[1]->foo);
}
public function testHasOneMatchWithNullLocalKey(): void
{
$relation = $this->getHasOneRelation();
$result1 = new HasOneOrManyDeprecationModelStub;
$result1->foreign_key = 1;
$model1 = new HasOneOrManyDeprecationModelStub;
$model1->id = 1;
$model2 = new HasOneOrManyDeprecationModelStub;
$model2->id = null;
$models = $relation->match([$model1, $model2], new Collection([$result1]), 'foo');
$this->assertInstanceOf(HasOneOrManyDeprecationModelStub::class, $models[0]->foo);
$this->assertNull($models[1]->foo);
}
protected function getHasManyRelation(): HasMany
{
$queryBuilder = m::mock(QueryBuilder::class);
$builder = m::mock(Builder::class, [$queryBuilder]);
$builder->shouldReceive('whereNotNull')->with('table.foreign_key');
$builder->shouldReceive('where')->with('table.foreign_key', '=', 1);
$related = m::mock(Model::class);
$builder->shouldReceive('getModel')->andReturn($related);
$parent = m::mock(Model::class);
$parent->shouldReceive('getAttribute')->with('id')->andReturn(1);
$parent->shouldReceive('getCreatedAtColumn')->andReturn('created_at');
$parent->shouldReceive('getUpdatedAtColumn')->andReturn('updated_at');
return new HasMany($builder, $parent, 'table.foreign_key', 'id');
}
protected function getHasOneRelation(): HasOne
{
$queryBuilder = m::mock(QueryBuilder::class);
$builder = m::mock(Builder::class, [$queryBuilder]);
$builder->shouldReceive('whereNotNull')->with('table.foreign_key');
$builder->shouldReceive('where')->with('table.foreign_key', '=', 1);
$related = m::mock(Model::class);
$builder->shouldReceive('getModel')->andReturn($related);
$parent = m::mock(Model::class);
$parent->shouldReceive('getAttribute')->with('id')->andReturn(1);
$parent->shouldReceive('getCreatedAtColumn')->andReturn('created_at');
$parent->shouldReceive('getUpdatedAtColumn')->andReturn('updated_at');
return new HasOne($builder, $parent, 'table.foreign_key', 'id');
}
}
class HasOneOrManyDeprecationModelStub extends Model
{
public $foreign_key;
} | php | github | https://github.com/laravel/framework | tests/Database/EloquentHasOneOrManyDeprecationTest.php |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from dialer_contact.models import Contact
from dialer_contact.constants import CONTACT_STATUS
from dialer_campaign.function_def import date_range, user_dialer_setting, \
dialer_setting_limit
from frontend.function_def import calculate_date
from frontend.constants import SEARCH_TYPE
from frontend_notification.views import frontend_send_notification
from django_lets_go.common_functions import get_pagination_vars, ceil_strdate,\
percentage, getvar, unset_session_var
from mod_utils.helper import Export_choice
from mod_sms.models import SMSCampaign, SMSCampaignSubscriber, SMSMessage
from mod_sms.constants import SMS_CAMPAIGN_STATUS, SMS_CAMPAIGN_COLUMN_NAME,\
SMS_REPORT_COLUMN_NAME, COLOR_SMS_DISPOSITION, SMS_NOTIFICATION_NAME,\
SMS_SUBSCRIBER_STATUS, SMS_MESSAGE_STATUS
from mod_sms.forms import SMSCampaignForm, SMSDashboardForm, SMSSearchForm,\
SMSCampaignSearchForm, DuplicateSMSCampaignForm
from mod_sms.function_def import check_sms_dialer_setting, get_sms_notification_status
from datetime import datetime
from django.utils.timezone import utc
from dateutil.relativedelta import relativedelta
import tablib
import time
redirect_url_to_smscampaign_list = '/sms_campaign/'
@login_required
def update_sms_campaign_status_admin(request, pk, status):
"""SMS Campaign Status (e.g. start|stop|pause|abort) can be changed from
admin interface (via sms campaign list)"""
smscampaign = SMSCampaign.objects.get(pk=pk)
recipient = smscampaign.common_sms_campaign_status(status)
sms_notification_status = get_sms_notification_status(int(status))
frontend_send_notification(request, sms_notification_status, recipient)
return HttpResponseRedirect(reverse("admin:mod_sms_smscampaign_changelist"))
@login_required
def update_sms_campaign_status_cust(request, pk, status):
"""SMS Campaign Status (e.g. start|stop|pause|abort) can be changed from
customer interface (via sms campaign list)"""
smscampaign = SMSCampaign.objects.get(pk=pk)
recipient = smscampaign.common_sms_campaign_status(status)
sms_notification_status = get_sms_notification_status(int(status))
frontend_send_notification(request, sms_notification_status, recipient)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
# SMSCampaign
@permission_required('mod_sms.view_smscampaign', login_url='/')
@login_required
def sms_campaign_list(request):
"""List all sms campaigns for the logged in user
**Attributes**:
* ``template`` - mod_sms/list.html
**Logic Description**:
* List all sms campaigns belonging to the logged in user
"""
form = SMSCampaignSearchForm(request.user, request.POST or None)
sort_col_field_list = ['id', 'name', 'startingdate', 'status', 'totalcontact']
pag_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='id')
phonebook_id = ''
status = 'all'
post_var_with_page = 0
if form.is_valid():
field_list = ['phonebook_id', 'status']
unset_session_var(request, field_list)
post_var_with_page = 1
phonebook_id = getvar(request, 'phonebook_id', setsession=True)
status = getvar(request, 'status', setsession=True)
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
phonebook_id = request.session.get('session_phonebook_id')
status = request.session.get('session_status')
form = SMSCampaignSearchForm(request.user, initial={'status': status,
'phonebook_id': phonebook_id})
if post_var_with_page == 0:
# default
# unset session var
field_list = ['status', 'phonebook_id']
unset_session_var(request, field_list)
kwargs = {}
if phonebook_id and phonebook_id != '0':
kwargs['phonebook__id__in'] = [int(phonebook_id)]
if status and status != 'all':
kwargs['status'] = status
smscampaign_list = SMSCampaign.objects.filter(user=request.user).order_by(pag_vars['sort_order'])
smscampaign_count = smscampaign_list.count()
if kwargs:
all_smscampaign_list = smscampaign_list.filter(**kwargs).order_by(pag_vars['sort_order'])
smscampaign_list = all_smscampaign_list[pag_vars['start_page']:pag_vars['end_page']]
smscampaign_count = all_smscampaign_list.count()
data = {
'form': form,
'smscampaign_list': smscampaign_list,
'total_campaign': smscampaign_count,
'SMS_CAMPAIGN_COLUMN_NAME': SMS_CAMPAIGN_COLUMN_NAME,
'col_name_with_order': pag_vars['col_name_with_order'],
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'info_msg': request.session.get('info_msg'),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
request.session['info_msg'] = ''
return render_to_response('mod_sms/list.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.add_smscampaign', login_url='/')
@login_required
def sms_campaign_add(request):
"""Add a new sms campaign for the logged in user
**Attributes**:
* ``form`` - SMSCampaignForm
* ``template`` - mod_sms/change.html
**Logic Description**:
* Before adding a sms campaign, check dialer setting limit if
applicable to the user.
* Add the new sms campaign which will belong to the logged in user
via SMSCampaignForm & get redirected to sms campaign list
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
request.session['error_msg'] = \
_("in order to add a sms campaign, you need to have your \
settings configured properly, please contact the admin.")
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
# Check dialer setting limit
if request.user and request.method != 'POST':
# check Max Number of running campaign
if check_sms_dialer_setting(request, check_for="smscampaign"):
msg = _("you have too many sms campaigns. Max allowed %(limit)s")\
% {'limit': dialer_setting_limit(request, limit_for="smscampaign")}
request.session['msg'] = msg
# sms campaign limit reached
frontend_send_notification(request, SMS_NOTIFICATION_NAME.sms_campaign_limit_reached)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
form = SMSCampaignForm(request.user, request.POST or None)
# Add sms campaign
if form.is_valid():
obj = form.save(commit=False)
obj.user = User.objects.get(username=request.user)
obj.stoppeddate = obj.expirationdate
obj.save()
form.save_m2m()
request.session["msg"] = _('"%(name)s" is added.') % {'name': request.POST['name']}
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
data = {
'form': form,
'action': 'add',
}
return render_to_response('mod_sms/change.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.delete_smsmessage', login_url='/')
@login_required
def sms_campaign_del(request, object_id):
"""Delete/Stop sms campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected sms campaign object
* ``object_list`` - Selected sms campaign objects
**Logic Description**:
* Delete/Stop the selected sms campaign from the sms campaign list
"""
stop_sms_campaign = request.GET.get('stop_sms_campaign', False)
try:
# When object_id is not 0
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
# Delete/Stop sms campaign
if sms_campaign:
if stop_sms_campaign:
sms_campaign.status = SMS_CAMPAIGN_STATUS.END
sms_campaign.save()
request.session["msg"] = _('"%(name)s" is stopped.') % {'name': sms_campaign.name}
else:
request.session["msg"] = _('"%(name)s" is deleted.') % {'name': sms_campaign.name}
sms_campaign.delete()
except:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
sms_campaign_list = SMSCampaign.objects.extra(where=['id IN (%s)' % values])
if sms_campaign_list:
if stop_sms_campaign:
sms_campaign_list.update(status=SMS_CAMPAIGN_STATUS.END)
request.session["msg"] = _('%(count)s sms campaign(s) are stopped.') % {'count': sms_campaign_list.count()}
else:
request.session["msg"] = _('%(count)s sms campaign(s) are deleted.') % {'count': sms_campaign_list.count()}
sms_campaign_list.delete()
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
@permission_required('mod_sms.change_smsmessage', login_url='/')
@login_required
def sms_campaign_change(request, object_id):
"""Update/Delete sms campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``form`` - SMSCampaignForm
* ``template`` - mod_sms/change.html
**Logic Description**:
* Update/delete selected sms campaign from the sms campaign list
via SMSCampaignForm & get redirected to the sms campaign list
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
form = SMSCampaignForm(request.user, request.POST or None, instance=sms_campaign)
if form.is_valid():
# Delete sms campaign
if request.POST.get('delete'):
sms_campaign_del(request, object_id)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
else:
# Update sms campaign
obj = form.save()
obj.save()
request.session["msg"] = _('"%(name)s" is updated.') % {'name': request.POST['name']}
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
data = {
'form': form,
'action': 'update',
}
return render_to_response('mod_sms/change.html', data, context_instance=RequestContext(request))
@login_required
def sms_campaign_duplicate(request, id):
"""
Duplicate sms campaign via DuplicateSMSCampaignForm
**Attributes**:
* ``id`` - Selected sms campaign object
* ``form`` - DuplicateSMSCampaignForm
* ``template`` - mod_sms/sms_campaign_duplicate.html
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
form = DuplicateSMSCampaignForm(request.user, request.POST or None)
request.session['error_msg'] = ''
if request.method == 'POST':
if form.is_valid():
sms_campaign_obj = SMSCampaign.objects.get(pk=id)
sms_campaign_obj.pk = None
sms_campaign_obj.campaign_code = request.POST.get('campaign_code')
sms_campaign_obj.name = request.POST.get('name')
sms_campaign_obj.status = SMS_CAMPAIGN_STATUS.PAUSE
sms_campaign_obj.startingdate = datetime.utcnow().replace(tzinfo=utc)
sms_campaign_obj.expirationdate = datetime.utcnow().replace(tzinfo=utc) + relativedelta(days=+1)
sms_campaign_obj.stoppeddate = datetime.utcnow().replace(tzinfo=utc) + relativedelta(days=+1)
sms_campaign_obj.imported_phonebook = ''
sms_campaign_obj.totalcontact = 0
sms_campaign_obj.save()
# Many to many field
for pb in request.POST.getlist('phonebook'):
sms_campaign_obj.phonebook.add(pb)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
else:
request.session['error_msg'] = True
data = {
'sms_campaign_id': id,
'form': form,
'err_msg': request.session.get('error_msg'),
}
request.session['error_msg'] = ''
return render_to_response('mod_sms/sms_campaign_duplicate.html', data, context_instance=RequestContext(request))
@login_required
def sms_campaign_text_message(request, object_id):
"""
Get sms campaign's text message
**Attributes**:
* ``object_id`` - Selected sms campaign object
* ``template`` - mod_sms/sms_campaign_text_message.html
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
data = {
'sms_campaign': sms_campaign,
}
request.session['error_msg'] = ''
return render_to_response('mod_sms/sms_campaign_text_message.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.view_sms_dashboard', login_url='/')
@login_required
def sms_dashboard(request, on_index=None):
"""SMS dashboard gives the following information
* No of SMSCampaigns for logged in user
* Total phonebook contacts
* Total SMSCampaigns contacts
* Amount of contact reached today
* Disposition of sms via pie chart
* SMS count shown on graph by days/hours
**Attributes**:
* ``template`` - mod_sms/sms_dashboard.html
* ``form`` - SMSDashboardForm
"""
# All sms_campaign for logged in User
sms_campaign_id_list = SMSCampaign.objects.values_list('id', flat=True).filter(user=request.user).order_by('id')
# Contacts count which are active and belong to those phonebook(s) which is
# associated with all sms campaign
pb_active_contact_count = Contact.objects.filter(
phonebook__smscampaign__in=sms_campaign_id_list,
status=CONTACT_STATUS.ACTIVE).count()
form = SMSDashboardForm(request.user, request.POST or None)
total_record = dict()
total_sms_count = 0
total_unsent = 0
total_sent = 0
total_delivered = 0
total_failed = 0
total_no_route = 0
total_unauthorized = 0
select_graph_for = 'sms count' # default
search_type = SEARCH_TYPE.D_Last_24_hours # default Last 24 hours
selected_sms_campaign = ''
if sms_campaign_id_list:
selected_sms_campaign = sms_campaign_id_list[0] # default sms campaign id
# selected_sms_campaign should not be empty
if selected_sms_campaign:
if form.is_valid():
selected_sms_campaign = request.POST['smscampaign']
search_type = request.POST['search_type']
end_date = datetime.utcnow().replace(tzinfo=utc)
start_date = calculate_date(search_type)
# date_length is used to do group by starting_date
if int(search_type) >= SEARCH_TYPE.B_Last_7_days: # all options except 30 days
date_length = 13
if int(search_type) == SEARCH_TYPE.C_Yesterday: # yesterday
now = datetime.utcnow().replace(tzinfo=utc)
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0).replace(tzinfo=utc) \
- relativedelta(days=1)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 999999).replace(tzinfo=utc) \
- relativedelta(days=1)
if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
date_length = 16
else:
date_length = 10 # Last 30 days option
select_data = {
"send_date": "SUBSTR(CAST(send_date as CHAR(30)),1," + str(date_length) + ")"}
# This calls list is used by pie chart
list_sms = SMSMessage.objects.filter(
sender=request.user,
sms_campaign_id=selected_sms_campaign,
send_date__range=(start_date, end_date))\
.extra(select=select_data)\
.values('send_date', 'status')\
.annotate(Count('send_date'))\
.order_by('send_date')
for i in list_sms:
# convert unicode date string into date
if i['status'] == 'Unsent':
total_unsent += i['send_date__count']
elif i['status'] == 'Sent':
total_sent += i['send_date__count']
elif i['status'] == 'Delivered':
total_delivered += i['send_date__count']
elif i['status'] == 'Failed':
total_failed += i['send_date__count']
elif i['status'] == 'No_Route':
total_no_route += i['send_date__count']
else:
total_unauthorized += i['send_date__count'] # Unauthorized
total_sms_count += i['send_date__count']
list_sms = SMSMessage.objects.filter(
sender=request.user,
sms_campaign_id=selected_sms_campaign,
send_date__range=(start_date, end_date))\
.extra(select=select_data).values('send_date')\
.annotate(Count('send_date')).order_by('send_date')
mintime = start_date
maxtime = end_date
sms_dict = {}
sms_dict_with_min = {}
for data in list_sms:
if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
int(data['send_date'][11:13]),
0, 0, 0).replace(tzinfo=utc)
if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
int(data['send_date'][11:13]),
int(data['send_date'][14:16]),
0, 0).replace(tzinfo=utc)
else:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
0, 0, 0, 0).replace(tzinfo=utc)
if ctime > maxtime:
maxtime = ctime
elif ctime < mintime:
mintime = ctime
# all options except 30 days
if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
sms_dict[int(ctime.strftime("%Y%m%d%H"))] = {
'sms_count': data['send_date__count']
}
sms_dict_with_min[int(ctime.strftime("%Y%m%d%H%M"))] = {
'sms_count': data['send_date__count']
}
else:
# Last 30 days option
sms_dict[int(ctime.strftime("%Y%m%d"))] = {
'sms_count': data['send_date__count']
}
dateList = date_range(mintime, maxtime, q=search_type)
i = 0
total_record = {}
for date in dateList:
inttime = int(date.strftime("%Y%m%d"))
# last 7 days | yesterday | last 24 hrs
if (int(search_type) == SEARCH_TYPE.B_Last_7_days
or int(search_type) == SEARCH_TYPE.C_Yesterday
or int(search_type) == SEARCH_TYPE.D_Last_24_hours):
for option in range(0, 24):
day_time = int(str(inttime) + str(option).zfill(2))
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d")),
int(str(option).zfill(2))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if day_time in sms_dict.keys():
total_record[dt]['sms_count'] += sms_dict[day_time]['sms_count']
# last 12 hrs | last 6 hrs | last 1 hrs
elif (int(search_type) == SEARCH_TYPE.E_Last_12_hours
or int(search_type) == SEARCH_TYPE.F_Last_6_hours
or int(search_type) == SEARCH_TYPE.G_Last_hour):
for hour in range(0, 24):
for minute in range(0, 60):
hr_time = int(str(inttime) + str(hour).zfill(2) + str(minute).zfill(2))
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d")),
int(str(hour).zfill(2)),
int(str(minute).zfill(2))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if hr_time in sms_dict_with_min.keys():
total_record[dt]['sms_count'] += sms_dict_with_min[hr_time]['sms_count']
else:
# Last 30 days option
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d"))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if inttime in sms_dict.keys():
total_record[dt]['sms_count'] += sms_dict[inttime]['sms_count']
# sorting on date col
total_record = total_record.items()
total_record = sorted(total_record, key=lambda k: k[0])
# lineWithFocusChart
final_charttype = "lineWithFocusChart"
xdata = []
ydata = []
for i in total_record:
xdata.append(i[0])
ydata.append(i[1]['sms_count'])
tooltip_date = "%d %b %y %H:%M %p"
extra_serie1 = {
"tooltip": {"y_start": "", "y_end": " SMS"},
"date_format": tooltip_date
}
final_chartdata = {
'x': xdata,
'name1': 'SMS', 'y1': ydata, 'extra1': extra_serie1,
}
# Contacts which are successfully messaged for running sms campaign
reached_contact = 0
if sms_campaign_id_list:
now = datetime.utcnow().replace(tzinfo=utc)
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0).replace(tzinfo=utc)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 999999).replace(tzinfo=utc)
sms_campaign_subscriber = SMSCampaignSubscriber.objects.filter(
sms_campaign_id__in=sms_campaign_id_list,
status=SMS_SUBSCRIBER_STATUS.COMPLETE,
updated_date__range=(start_date, end_date)).count()
reached_contact += sms_campaign_subscriber
# PieChart
sms_analytic_charttype = "pieChart"
xdata = []
ydata = []
sms_analytic_chartdata = {'x': xdata, 'y1': ydata}
if total_sms_count != 0:
for i in SMS_MESSAGE_STATUS:
xdata.append(i[0].upper())
# Y-axis order depend upon SMS_MESSAGE_STATUS
# 'UNSENT', 'SENT', 'DELIVERED', 'FAILED', 'NO_ROUTE', 'UNAUTHORIZED'
ydata = [
percentage(total_unsent, total_sms_count),
percentage(total_sent, total_sms_count),
percentage(total_delivered, total_sms_count),
percentage(total_failed, total_sms_count),
percentage(total_no_route, total_sms_count),
percentage(total_unauthorized, total_sms_count),
]
color_list = [
COLOR_SMS_DISPOSITION['UNSENT'],
COLOR_SMS_DISPOSITION['SENT'],
COLOR_SMS_DISPOSITION['DELIVERED'],
COLOR_SMS_DISPOSITION['FAILED'],
COLOR_SMS_DISPOSITION['NO_ROUTE'],
COLOR_SMS_DISPOSITION['UNAUTHORIZED'],
]
extra_serie = {
"tooltip": {"y_start": "", "y_end": " %"},
"color_list": color_list
}
kwargs1 = {}
kwargs1['resize'] = True
sms_analytic_chartdata = {
'x': xdata, 'y1': ydata, 'extra1': extra_serie,
'kwargs1': kwargs1,
}
data = {
'form': form,
'SEARCH_TYPE': SEARCH_TYPE,
'pb_active_contact_count': pb_active_contact_count,
'reached_contact': reached_contact,
'total_record': total_record,
'select_graph_for': select_graph_for,
'total_sms_count': total_sms_count,
'total_unsent': total_unsent,
'total_sent': total_sent,
'total_delivered': total_delivered,
'total_failed': total_failed,
'total_no_route': total_no_route,
'total_unauthorized': total_unauthorized,
'unsent_color': COLOR_SMS_DISPOSITION['UNSENT'],
'sent_color': COLOR_SMS_DISPOSITION['SENT'],
'delivered_color': COLOR_SMS_DISPOSITION['DELIVERED'],
'failed_color': COLOR_SMS_DISPOSITION['FAILED'],
'no_route_color': COLOR_SMS_DISPOSITION['NO_ROUTE'],
'unauthorized_color': COLOR_SMS_DISPOSITION['UNAUTHORIZED'],
'final_chartcontainer': 'lineplusbarwithfocuschart_container',
'final_chartdata': final_chartdata,
'final_charttype': final_charttype,
'final_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': False,
},
'sms_analytic_chartcontainer': 'piechart_container',
'sms_analytic_charttype': sms_analytic_charttype,
'sms_analytic_chartdata': sms_analytic_chartdata,
'sms_analytic_extra': {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': False,
},
}
if on_index == 'yes':
return data
return render_to_response('mod_sms/sms_dashboard.html', data, context_instance=RequestContext(request))
@login_required
@permission_required('mod_sms.view_sms_report', login_url='/')
def sms_report(request):
"""SMS Report
**Attributes**:
* ``form`` - SMSSearchForm
* ``template`` - mod_sms/sms_report.html
**Logic Description**:
* Get SMS list according to search parameters for logged-in user
**Important variable**:
* ``request.session['sms_record_kwargs']`` - stores sms kwargs
"""
sort_col_field_list = ['send_date', 'recipient_number', 'uuid', 'status', 'status_message', 'gateway']
pag_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='send_date')
from_date = ''
to_date = ''
status = 'all'
smscampaign = ''
form = SMSSearchForm(request.user, request.POST or None)
action = 'tabs-1'
kwargs = {}
post_var_with_page = 0
if form.is_valid():
post_var_with_page = 1
field_list = ['start_date', 'end_date', 'status', 'smscampaign']
unset_session_var(request, field_list)
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = ceil_strdate(str(from_date), 'start')
end_date = ceil_strdate(str(to_date), 'end')
converted_start_date = start_date.strftime('%Y-%m-%d')
converted_end_date = end_date.strftime('%Y-%m-%d')
request.session['session_start_date'] = converted_start_date
request.session['session_end_date'] = converted_end_date
status = getvar(request, 'status', setsession=True)
smscampaign = getvar(request, 'smscampaign', setsession=True)
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
start_date = request.session.get('session_start_date')
end_date = request.session.get('session_end_date')
start_date = ceil_strdate(start_date, 'start')
end_date = ceil_strdate(end_date, 'end')
status = request.session.get('session_status')
smscampaign = request.session.get('session_smscampaign')
form = SMSSearchForm(request.user,
initial={'from_date': start_date.strftime('%Y-%m-%d'),
'to_date': end_date.strftime('%Y-%m-%d'),
'status': status,
'smscampaign': smscampaign})
if post_var_with_page == 0:
# default
tday = datetime.utcnow().replace(tzinfo=utc)
from_date = tday.strftime('%Y-%m-%d')
to_date = tday.strftime('%Y-%m-%d')
start_date = datetime(tday.year, tday.month, tday.day, 0, 0, 0, 0).replace(tzinfo=utc)
end_date = datetime(tday.year, tday.month, tday.day, 23, 59, 59, 999999).replace(tzinfo=utc)
status = 'all'
smscampaign = ''
form = SMSSearchForm(request.user, initial={'from_date': from_date, 'to_date': to_date,
'status': status, 'smscampaign': smscampaign})
# unset session var
request.session['session_start_date'] = start_date
request.session['session_end_date'] = end_date
request.session['session_status'] = status
request.session['session_smscampaign'] = smscampaign
kwargs['sender'] = request.user
if start_date and end_date:
kwargs['send_date__range'] = (start_date, end_date)
if start_date and end_date == '':
kwargs['send_date__gte'] = start_date
if start_date == '' and end_date:
kwargs['send_date__lte'] = end_date
if status and status != 'all':
kwargs['status__exact'] = status
if smscampaign and smscampaign != '0':
kwargs['sms_campaign_id'] = smscampaign
smslist = SMSMessage.objects.filter(**kwargs)
all_sms_list = smslist.values_list('id', flat=True)
sms_list = smslist.order_by(pag_vars['sort_order'])[pag_vars['start_page']:pag_vars['end_page']]
# Session variable is used to get record set with searched option
# into export file
request.session['sms_record_kwargs'] = kwargs
select_data = {"send_date": "SUBSTR(CAST(send_date as CHAR(30)),1,10)"}
# Get Total Rrecords from SMSMessage Report table for Daily SMS Report
total_data = all_sms_list.extra(select=select_data).values('send_date')\
.annotate(Count('send_date')).order_by('-send_date')
# Following code will count total sms
if total_data.count() != 0:
total_sms = sum([x['send_date__count'] for x in total_data])
else:
total_sms = 0
data = {
'form': form,
'from_date': from_date,
'all_sms_list': all_sms_list,
'sms_list': sms_list,
'sms_count': all_sms_list.count() if all_sms_list else 0,
'SMS_REPORT_COLUMN_NAME': SMS_REPORT_COLUMN_NAME,
'col_name_with_order': pag_vars['col_name_with_order'],
'start_date': start_date,
'end_date': end_date,
'to_date': to_date,
'action': action,
'status': status,
'total_data': total_data.reverse(),
'total_sms': total_sms,
}
return render_to_response('mod_sms/sms_report.html', data, context_instance=RequestContext(request))
@login_required
def export_sms_report(request):
"""Export CSV file of SMS record
**Important variable**:
* ``request.session['sms_record_kwargs']`` - stores sms query set
**Exported fields**: ['sender', 'recipient_number', 'send_date', 'uuid',
'status', 'status_message', 'gateway']
"""
format_type = request.GET['format']
# get the response object, this can be used as a stream.
response = HttpResponse(content_type='text/%s' % format_type)
# force download.
response['Content-Disposition'] = 'attachment;filename=sms_export.%s' % format_type
kwargs = {}
kwargs = request.session['sms_record_kwargs']
qs = SMSMessage.objects.filter(**kwargs)
headers = ('sender', 'recipient_number', 'send_date', 'uuid',
'status', 'status_message', 'gateway')
list_val = []
for i in qs:
send_date = i.send_date
if format_type == Export_choice.JSON or format_type == Export_choice.XLS:
send_date = str(i.send_date)
gateway = i.gateway.name if i.gateway else ''
list_val.append([
i.sender.username,
i.recipient_number,
send_date,
str(i.uuid),
i.status,
i.status_message,
gateway,
])
data = tablib.Dataset(*list_val, headers=headers)
if format_type == Export_choice.XLS:
response.write(data.xls)
elif format_type == Export_choice.CSV:
response.write(data.csv)
elif format_type == Export_choice.JSON:
response.write(data.json)
return response | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/binopAssignmentShouldHaveType.ts] ////
//// [binopAssignmentShouldHaveType.ts]
declare var console;
"use strict";
namespace Test {
export class Bug {
getName():string {
return "name";
}
bug() {
var name:string= null;
if ((name= this.getName()).length > 0) {
console.log(name);
}
}
}
}
//// [binopAssignmentShouldHaveType.js]
"use strict";
"use strict";
var Test;
(function (Test) {
class Bug {
getName() {
return "name";
}
bug() {
var name = null;
if ((name = this.getName()).length > 0) {
console.log(name);
}
}
}
Test.Bug = Bug;
})(Test || (Test = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/binopAssignmentShouldHaveType.js |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import PGEmbedding
from langchain_community.vectorstores.pgembedding import (
CollectionStore,
EmbeddingStore,
QueryResult,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollectionStore": "langchain_community.vectorstores.pgembedding",
"EmbeddingStore": "langchain_community.vectorstores.pgembedding",
"QueryResult": "langchain_community.vectorstores.pgembedding",
"PGEmbedding": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollectionStore",
"EmbeddingStore",
"PGEmbedding",
"QueryResult",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/vectorstores/pgembedding.py |
import type {
CallExpression,
Expression,
Identifier,
ImportDefaultSpecifier,
ImportNamespaceSpecifier,
ImportSpecifier,
Node,
StringLiteral,
TSMethodSignature,
TSPropertySignature,
} from '@babel/types'
import path from 'path'
export const UNKNOWN_TYPE = 'Unknown'
export function resolveObjectKey(
node: Node,
computed: boolean,
): string | undefined {
switch (node.type) {
case 'StringLiteral':
case 'NumericLiteral':
return String(node.value)
case 'Identifier':
if (!computed) return node.name
}
return undefined
}
export function concatStrings(
strs: Array<string | null | undefined | false>,
): string {
return strs.filter((s): s is string => !!s).join(', ')
}
export function isLiteralNode(node: Node): boolean {
return node.type.endsWith('Literal')
}
export function isCallOf(
node: Node | null | undefined,
test: string | ((id: string) => boolean) | null | undefined,
): node is CallExpression {
return !!(
node &&
test &&
node.type === 'CallExpression' &&
node.callee.type === 'Identifier' &&
(typeof test === 'string'
? node.callee.name === test
: test(node.callee.name))
)
}
export function toRuntimeTypeString(types: string[]): string {
return types.length > 1 ? `[${types.join(', ')}]` : types[0]
}
export function getImportedName(
specifier:
| ImportSpecifier
| ImportDefaultSpecifier
| ImportNamespaceSpecifier,
): string {
if (specifier.type === 'ImportSpecifier')
return specifier.imported.type === 'Identifier'
? specifier.imported.name
: specifier.imported.value
else if (specifier.type === 'ImportNamespaceSpecifier') return '*'
return 'default'
}
export function getId(node: Identifier | StringLiteral): string
export function getId(node: Expression): string | null
export function getId(node: Expression) {
return node.type === 'Identifier'
? node.name
: node.type === 'StringLiteral'
? node.value
: null
}
export function getStringLiteralKey(
node: TSPropertySignature | TSMethodSignature,
): string | null {
return node.computed
? node.key.type === 'TemplateLiteral' && !node.key.expressions.length
? node.key.quasis.map(q => q.value.cooked).join('')
: null
: node.key.type === 'Identifier'
? node.key.name
: node.key.type === 'StringLiteral'
? node.key.value
: node.key.type === 'NumericLiteral'
? String(node.key.value)
: null
}
const identity = (str: string) => str
const fileNameLowerCaseRegExp = /[^\u0130\u0131\u00DFa-z0-9\\/:\-_\. ]+/g
const toLowerCase = (str: string) => str.toLowerCase()
function toFileNameLowerCase(x: string) {
return fileNameLowerCaseRegExp.test(x)
? x.replace(fileNameLowerCaseRegExp, toLowerCase)
: x
}
/**
* We need `getCanonicalFileName` when creating ts module resolution cache,
* but TS does not expose it directly. This implementation is replicated from
* the TS source code.
*/
export function createGetCanonicalFileName(
useCaseSensitiveFileNames: boolean,
): (str: string) => string {
return useCaseSensitiveFileNames ? identity : toFileNameLowerCase
}
// in the browser build, the polyfill doesn't expose posix, but defaults to
// posix behavior.
const normalize = (path.posix || path).normalize
const windowsSlashRE = /\\/g
export function normalizePath(p: string): string {
return normalize(p.replace(windowsSlashRE, '/'))
}
export const joinPaths: (...paths: string[]) => string = (path.posix || path)
.join
/**
* key may contain symbols
* e.g. onUpdate:modelValue -> "onUpdate:modelValue"
*/
export const propNameEscapeSymbolsRE: RegExp =
/[ !"#$%&'()*+,./:;<=>?@[\\\]^`{|}~\-]/
export function getEscapedPropName(key: string): string {
return propNameEscapeSymbolsRE.test(key) ? JSON.stringify(key) : key
}
export const isJS = (...langs: (string | null | undefined)[]): boolean =>
langs.some(lang => lang === 'js' || lang === 'jsx')
export const isTS = (...langs: (string | null | undefined)[]): boolean =>
langs.some(lang => lang === 'ts' || lang === 'tsx') | typescript | github | https://github.com/vuejs/core | packages/compiler-sfc/src/script/utils.ts |
win_xml = """<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<uuid>{uuid}</uuid>
<name>{name}</name>
<description>{desc}</description>
<memory unit='KiB'>{memory}</memory>
<currentMemory unit='KiB'>{memory}</currentMemory>
<vcpu placement='static'>{cpu}</vcpu>
<numatune>
<memory mode='preferred' nodeset='0'/>
</numatune>
<os>
<type arch='x86_64' machine='pc-i440fx-2.6'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
<hap/>
<viridian/>
<hyperv>
<relaxed state='on'/>
<vapic state='on'/>
<spinlocks state='on' retries='4095'/>
</hyperv>
</features>
<cpu mode='host-model'>
<model fallback='allow'/>
<topology sockets='{sockets}' cores='{cores}' threads='{threads}'/>
</cpu>
<clock offset='localtime'>
<timer name='rtc' tickpolicy='catchup' track='guest'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='hpet' present='no'/>
<timer name='hypervclock' present='yes'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
{disk_xml}
<controller type='usb' index='0' model='ich9-ehci1'>
</controller>
<controller type='usb' index='0' model='ich9-uhci1'>
</controller>
<controller type='usb' index='0' model='ich9-uhci2'>
</controller>
<controller type='usb' index='0' model='ich9-uhci3'>
</controller>
<controller type='ide' index='0'>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='virtio-serial' index='0'>
</controller>
<interface type='bridge'>
<mac address='{mac}'/>
<source bridge='br0'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='unix'>
<source mode='bind' path='/var/lib/libvirt/qemu/channel/target/{name}.guest-to-host.0'/>
<target type='virtio' name='org.qemu.guest_agent.0'/>
</channel>
<input type='tablet' bus='usb'>
<address type='usb' bus='0' port='1'/>
</input>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='vnc' port='{vnc_port}' autoport='yes' listen='0'>
<listen type='address' address='0'/>
</graphics>
<video>
<model type='cirrus' vram='16384' heads='1' primary='yes'/>
</video>
<memballoon model='virtio'>
</memballoon>
</devices>
</domain>
"""
linux_xml = """<domain type='kvm'>
<uuid>{uuid}</uuid>
<name>{name}</name>
<description>{desc}</description>
<memory unit='KiB'>{memory}</memory>
<currentMemory unit='KiB'>{memory}</currentMemory>
<vcpu placement='static'>{cpu}</vcpu>
<numatune>
<memory mode='preferred' nodeset='0'/>
</numatune>
<os>
<type arch='x86_64' machine='pc-i440fx-2.6'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
<hap/>
<viridian/>
</features>
<cpu mode='host-model'>
<model fallback='allow'/>
<topology sockets='{sockets}' cores='{cores}' threads='{threads}'/>
</cpu>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='hpet' present='no'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
{disk_xml}
<controller type='usb' index='0' model='ich9-ehci1'>
</controller>
<controller type='usb' index='0' model='ich9-uhci1'>
</controller>
<controller type='usb' index='0' model='ich9-uhci2'>
</controller>
<controller type='usb' index='0' model='ich9-uhci3'>
</controller>
<controller type='ide' index='0'>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='virtio-serial' index='0'>
</controller>
<interface type='bridge'>
<mac address='{mac}'/>
<source bridge='br0'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='unix'>
<source mode='bind' path='/var/lib/libvirt/qemu/channel/target/{name}.guest-to-host.0'/>
<target type='virtio' name='org.qemu.guest_agent.0'/>
</channel>
<input type='tablet' bus='usb'>
<address type='usb' bus='0' port='1'/>
</input>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes' listen='0'>
<listen type='address' address='0'/>
</graphics>
<video>
<model type='cirrus' vram='16384' heads='1' primary='yes'/>
</video>
<memballoon model='virtio'>
</memballoon>
</devices>
</domain>
"""
disk_xml_device1 = """
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='writeback' io='threads'/>
<source dev='/dev/vg0/{name}-phy{num}'/>
<target dev='vd{dev_alpha}' bus='virtio'/>
</disk>
"""
disk_xml_device0 = """
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='writeback' io='threads'/>
<source file='{STORAGEDIR}/{name}.phy{num}.raw'/>
<target dev='vd{dev_alpha}' bus='virtio'/>
</disk>
""" | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.