prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nimport sys\nsys.path.append(\"..\")\nimport pygame\nimport core\nimport widgets\n\n# Text label example.\n\nWINDOW_WIDTH = 1024\nWINDOW_HEIGHT = 728\n\npygame.init()\npygame.font.init\nscreen = pygame.display.set_mode((WINDOW_WIDTH... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nimport sys\nsys.path.append(\"..\")\nimport pygame\nimport core\nimport widgets\n\n# Text label example.\n\nWINDOW_WIDTH = 1024\nWINDOW_HEIGHT = 728\n\npygame.init()\npygame.font.init\nscreen = pygame.display.set_mo... | ```python
import sys
sys.path.append("..")
import pygame
import core
import widgets
# Text label example.
WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 728
pygame.init()
pygame.font.init
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
clock = pygame.time.Clock()
FPS = 60
running = True
if __name__ == "__main__":
panel = widgets.Panel(core.Grid((3, 10), (WINDOW_WIDTH, WINDOW_HEIGHT)), None, None, (0, 0))
panel.set_color((155, 155, 155, 255))
text = widgets.TextLabel(panel, (1, 2), core.Text(
"""
Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in
reprehenderit in voluptate velit
esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint
occaecat cupidatat non proident,
sunt in culpa qui officia deserunt
mollit anim id est laborum.""", 13, core.BLACK)
)
text.set_color(core.WHITE) # This is the color of the widget, not to be confused with the color of its text.
text.set_span((0, 5))
text.set_border(core.BLACK, 8)
text.set_margin(10) # Altering the margin because of the border.
def redraw():
pygame.display.flip()
screen.fill((0, 0, 0))
panel.draw(screen)
text.draw(screen)
while (running):
clock.tick(FPS)
redraw()
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
``` |
[
{
"content": "```python\nimport time\nimport signal\nimport sys\nimport smbus\nimport robot_data_pb2\nfrom oled_display import OledDisplay\n\nclass RobotDriver:\n SERVO_STOP = 90\n\n def __init__(self, i2c_address=0x04, i2c_bus=1, oled_display=None):\n self.i2c_address = i2c_address\n self.i... | [
{
"content": "<|memory_start|>```python\nimport time\nimport signal\nimport sys\nimport smbus\nimport robot_data_pb2\nfrom oled_display import OledDisplay\n\nclass RobotDriver:\n SERVO_STOP = 90\n\n def __init__(self, i2c_address=0x04, i2c_bus=1, oled_display=None):\n self.i2c_address = i2c_address... | ```python
import time
import signal
import sys
import smbus
import robot_data_pb2
from oled_display import OledDisplay
class RobotDriver:
SERVO_STOP = 90
def __init__(self, i2c_address=0x04, i2c_bus=1, oled_display=None):
self.i2c_address = i2c_address
self.i2c_bus = smbus.SMBus(i2c_bus)
self.oled_display = oled_display
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.current_state = robot_data_pb2.RobotData()
self.set_state(s0_pos=90, s1_pos=90, led_pattern=robot_data_pb2.RobotData.OFF)
def exit_gracefully(self, signum, frame):
print('Exiting.')
self.set_state(s0_pos=90, s1_pos=90, led_pattern=robot_data_pb2.RobotData.OFF)
if self.oled_display:
self.oled_display.clear()
sys.exit(0)
def get_state(self):
try:
data_length = self.i2c_bus.read_byte(self.i2c_address)
#print('Length: {}'.format(data_length))
i = 0;
data = []
while i < data_length:
data.append(self.i2c_bus.read_byte(self.i2c_address))
i+=1
rd = robot_data_pb2.RobotData()
rd.ParseFromString("".join(map(chr, data)))
print(rd)
if self.oled_display:
oled_text = ['RobotState:',
's0: {}, s1: {}'.format(rd.s0_pos, rd.s1_pos),
'sF: {}, sB: {}'.format(rd.sonarf, rd.sonarb),
]
self.oled_display.display_text('\n'.join(oled_text))
except Exception as e:
print('Error getting state from robot.')
def set_state(self, s0_pos, s1_pos, led_pattern):
try:
self.current_state.s0_pos=s0_pos
self.current_state.s1_pos=s1_pos
self.current_state.led_pattern=led_pattern
self.current_state.sonarf=0
self.current_state.sonarb=0
data = self.current_state.SerializeToString()
data_size = len(data)
# write header
self.i2c_bus.write_byte(self.i2c_address, (data_size >> 8) & 0xFF)
self.i2c_bus.write_byte(self.i2c_address, data_size & 0xFF)
# write data
for c in data:
self.i2c_bus.write_byte(self.i2c_address, ord(c))
except Exception as e:
print(e)
if __name__ == '__main__':
oled = OledDisplay()
driver = RobotDriver(oled_display=oled)
while True:
for i in range(90, 40, -5):
driver.set_state(s0_pos=i, s1_pos=i, led_pattern=robot_data_pb2.RobotData.RAINBOW)
time.sleep(.5)
driver.get_state()
for i in range(40, 90, 5):
driver.set_state(s0_pos=i, s1_pos=i, led_pattern=robot_data_pb2.RobotData.RAINBOW)
time.sleep(.5)
driver.get_state()
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# vim:fileencoding=utf-8\nfrom __future__ import (unicode_literals, division, absolute_import,\n print_function)\n\n__license__ = 'GPL v3'\n__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'\n\nimport os\nfrom functools import par... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# vim:fileencoding=utf-8\nfrom __future__ import (unicode_literals, division, absolute_import,\n print_function)\n\n__license__ = 'GPL v3'\n__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'\n\nimport os\nfrom func... | ```python
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from functools import partial
from calibre import prepare_string_for_xml, force_unicode
from calibre.ebooks.metadata import fmt_sidx
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.constants import filesystem_encoding
from calibre.library.comments import comments_to_html
from calibre.utils.icu import sort_key
from calibre.utils.formatter import EvalFormatter
from calibre.utils.date import is_date_undefined
from calibre.utils.localization import calibre_langcode_to_name
default_sort = ('title', 'title_sort', 'authors', 'author_sort', 'series', 'rating', 'pubdate', 'tags', 'publisher', 'identifiers')
def field_sort(mi, name):
try:
title = mi.metadata_for_field(name)['name']
except:
title = 'zzz'
return {x:(i, None) for i, x in enumerate(default_sort)}.get(name, (10000, sort_key(title)))
def displayable_field_keys(mi):
for k in mi.all_field_keys():
try:
m = mi.metadata_for_field(k)
except:
continue
if (
m is not None and m['kind'] == 'field' and m['datatype'] is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort') and
not k.endswith('_index')
):
yield k
def get_field_list(mi):
for field in sorted(displayable_field_keys(mi), key=partial(field_sort, mi)):
yield field, True
def mi_to_html(mi, field_list=None, default_author_link=None, use_roman_numbers=True, rating_font='Liberation Serif'):
if field_list is None:
field_list = get_field_list(mi)
ans = []
comment_fields = []
isdevice = not hasattr(mi, 'id')
row = u'<td class="title">%s</td><td class="value">%s</td>'
p = prepare_string_for_xml
a = partial(prepare_string_for_xml, attribute=True)
for field in (field for field, display in field_list if display):
try:
metadata = mi.metadata_for_field(field)
except:
continue
if not metadata:
continue
if field == 'sort':
field = 'title_sort'
if metadata['datatype'] == 'bool':
isnull = mi.get(field) is None
else:
isnull = mi.is_null(field)
if isnull:
continue
name = metadata['name']
if not name:
name = field
name += ':'
if metadata['datatype'] == 'comments' or field == 'comments':
val = getattr(mi, field)
if val:
val = force_unicode(val)
comment_fields.append(comments_to_html(val))
elif metadata['datatype'] == 'rating':
val = getattr(mi, field)
if val:
val = val/2.0
ans.append((field,
u'<td class="title">%s</td><td class="rating value" '
'style=\'font-family:"%s"\'>%s</td>'%(
name, rating_font, u'\u2605'*int(val))))
elif metadata['datatype'] == 'composite' and \
metadata['display'].get('contains_html', False):
val = getattr(mi, field)
if val:
val = force_unicode(val)
ans.append((field,
row % (name, comments_to_html(val))))
elif field == 'path':
if mi.path:
path = force_unicode(mi.path, filesystem_encoding)
scheme = u'devpath' if isdevice else u'path'
url = prepare_string_for_xml(path if isdevice else
unicode(mi.id), True)
pathstr = _('Click to open')
extra = ''
if isdevice:
durl = url
if durl.startswith('mtp:::'):
durl = ':::'.join((durl.split(':::'))[2:])
extra = '<br><span style="font-size:smaller">%s</span>'%(
prepare_string_for_xml(durl))
link = u'<a href="%s:%s" title="%s">%s</a>%s' % (scheme, url,
prepare_string_for_xml(path, True), pathstr, extra)
ans.append((field, row % (name, link)))
elif field == 'formats':
if isdevice:
continue
path = ''
if mi.path:
h, t = os.path.split(mi.path)
path = '/'.join((os.path.basename(h), t))
data = ({
'fmt':x, 'path':a(path or ''), 'fname':a(mi.format_files.get(x, '')),
'ext':x.lower(), 'id':mi.id
} for x in mi.formats)
fmts = [u'<a title="{path}/{fname}.{ext}" href="format:{id}:{fmt}">{fmt}</a>'.format(**x) for x in data]
ans.append((field, row % (name, u', '.join(fmts))))
elif field == 'identifiers':
urls = urls_from_identifiers(mi.identifiers)
links = [u'<a href="%s" title="%s:%s">%s</a>' % (a(url), a(id_typ), a(id_val), p(name))
for name, id_typ, id_val, url in urls]
links = u', '.join(links)
if links:
ans.append((field, row % (_('Ids')+':', links)))
elif field == 'authors' and not isdevice:
authors = []
formatter = EvalFormatter()
for aut in mi.authors:
link = ''
if mi.author_link_map[aut]:
link = mi.author_link_map[aut]
elif default_author_link:
vals = {'author': aut.replace(' ', '+')}
try:
vals['author_sort'] = mi.author_sort_map[aut].replace(' ', '+')
except:
vals['author_sort'] = aut.replace(' ', '+')
link = formatter.safe_format(
default_author_link, vals, '', vals)
aut = p(aut)
if link:
authors.append(u'<a calibre-data="authors" title="%s" href="%s">%s</a>'%(a(link), a(link), aut))
else:
authors.append(aut)
ans.append((field, row % (name, u' & '.join(authors))))
elif field == 'languages':
if not mi.languages:
continue
names = filter(None, map(calibre_langcode_to_name, mi.languages))
ans.append((field, row % (name, u', '.join(names))))
else:
val = mi.format_field(field)[-1]
if val is None:
continue
val = p(val)
if metadata['datatype'] == 'series':
sidx = mi.get(field+'_index')
if sidx is None:
sidx = 1.0
val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict(
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers),
series=p(getattr(mi, field)))
elif metadata['datatype'] == 'datetime':
aval = getattr(mi, field)
if is_date_undefined(aval):
continue
ans.append((field, row % (name, val)))
dc = getattr(mi, 'device_collections', [])
if dc:
dc = u', '.join(sorted(dc, key=sort_key))
ans.append(('device_collections',
row % (_('Collections')+':', dc)))
def classname(field):
try:
dt = mi.metadata_for_field(field)['datatype']
except:
dt = 'text'
return 'datatype_%s'%dt
ans = [u'<tr id="%s" class="%s">%s</tr>'%(field.replace('#', '_'),
classname(field), html) for field, html in ans]
# print '\n'.join(ans)
return u'<table class="fields">%s</table>'%(u'\n'.join(ans)), comment_fields
``` |
[
{
"content": "Here is the code content:\n```python\n# $Id$\nimport sys\nimport os.path\nimport math\nfrom itcc.molecule import read, write\nfrom itcc.molecule.tools import neighbours, is_pyramid\nfrom itcc.molecule import relalist\n\ntry:\n sorted\nexcept:\n from itcc.core.tools import sorted_ as sorted\n... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# $Id$\nimport sys\nimport os.path\nimport math\nfrom itcc.molecule import read, write\nfrom itcc.molecule.tools import neighbours, is_pyramid\nfrom itcc.molecule import relalist\n\ntry:\n sorted\nexcept:\n from itcc.core.tools import sor... | ```python
# $Id$
import sys
import os.path
import math
from itcc.molecule import read, write
from itcc.molecule.tools import neighbours, is_pyramid
from itcc.molecule import relalist
try:
sorted
except:
from itcc.core.tools import sorted_ as sorted
def mirrormol():
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <xyzfname>\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
mol = read.readxyz(file(sys.argv[1]))
mol.coords = -mol.coords
write.writexyz(mol)
def printbonds():
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <xyzfname>\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
mol = read.readxyz(file(sys.argv[1]))
a = relalist.Relalist(mol)
print a
def detailcmp():
from optparse import OptionParser
usage = '%prog [options] <xyzfname1> <xyzfname2>'
parser = OptionParser(usage=usage)
parser.add_option('-a', "--atoms", dest="atoms",
help="only compare selected atoms, 1-based",
metavar="STRING")
parser.add_option('-A', "--atomsfile", dest="atomsfile",
help="read the selected atoms from file",
metavar="FILE")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("incorrect number of arguments")
if options.atoms and options.atomsfile:
parser.error("options conflict")
if options.atomsfile:
options.atoms = file(options.atomsfile).read()
atoms = None
if options.atoms:
atoms = [int(x)-1 for x in options.atoms.split()]
mol1 = read.readxyz(file(args[0]))
mol2 = read.readxyz(file(args[1]))
r1 = relalist.Relalist(mol1)
bonds_data = []
for i,j in r1.bonds:
if atoms is not None and (i not in atoms or j not in atoms): continue
l1 = mol1.calclen(i,j)
l2 = mol2.calclen(i,j)
bonds_data.append((abs(l1-l2), (i+1,j+1), l1, l2))
angles_data = []
for i,j,k in r1.angles:
if atoms is not None \
and (i not in atoms \
or j not in atoms \
or k not in atoms):
continue
a1 = math.degrees(mol1.calcang(i,j,k))
a2 = math.degrees(mol2.calcang(i,j,k))
angles_data.append((abs(a1-a2), (i+1,j+1,k+1), a1, a2))
torsions_data = []
for i,j,k,l in r1.torsions:
if atoms is not None \
and (i not in atoms \
or j not in atoms \
or k not in atoms
or l not in atoms):
continue
t1 = math.degrees(mol1.calctor(i,j,k,l))
t2 = math.degrees(mol2.calctor(i,j,k,l))
torsions_data.append((180-abs(abs(t1-t2)-180), (i+1,j+1,k+1,l+1), t1, t2))
print 'bonds:'
for x in sorted(bonds_data):
print x
print
print 'angles:'
for x in sorted(angles_data):
print x
print
print 'torsions:'
for x in sorted(torsions_data):
print x[1][0], x[1][1], x[1][2], x[1][3], x[2], x[3], x[0]
def rg():
if len(sys.argv) < 2:
sys.stderr.write('Usage: %s XYZFNAME...\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
from itcc.molecule import radius_of_gyration
for fname in sys.argv[1:]:
ifile = sys.stdin
if fname != '-':
ifile = file(fname)
mol = read.readxyz(ifile)
print ifile.name, radius_of_gyration(mol)
def sub_pyramid_check(fname, atoms):
mol = read.readxyz(file(fname))
if atoms is None:
atoms = range(len(mol))
res = []
for atom in atoms:
neis = neighbours(mol, atom)
if len(neis) != 4:
continue
if is_pyramid(mol.coords[atom],
mol.coords[neis[0]],
mol.coords[neis[1]],
mol.coords[neis[2]],
mol.coords[neis[3]]):
res.append(atom)
return res
def pyramid_check():
from optparse import OptionParser
usage = '%prog [options] <xyzfname>...'
parser = OptionParser(usage=usage)
parser.add_option('-a', "--atoms", dest="atoms",
help="only compare selected atoms, 1-based",
metavar="STRING")
parser.add_option('-A', "--atomsfile", dest="atomsfile",
help="read the selected atoms from file",
metavar="FILE")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
if options.atoms and options.atomsfile:
parser.error("options conflict")
if options.atomsfile:
options.atoms = file(options.atomsfile).read()
atoms = None
if options.atoms:
atoms = [int(x)-1 for x in options.atoms.split()]
for fname in args:
res = sub_pyramid_check(fname, atoms)
if res:
print fname, ' '.join(str(x+1) for x in res)
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# -*- coding: utf-8 -*-\n#\n# Polycircles documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 21 13:22:59 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all poss... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Polycircles documentation build configuration file, created by\n# sphinx-quickstart on Mon Apr 21 13:22:59 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note t... | ```python
# -*- coding: utf-8 -*-
#
# Polycircles documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 21 13:22:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Polycircles'
copyright = u'2014, Adam Matan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Polycirclesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Polycircles.tex', u'Polycircles Documentation',
u'Adam Matan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'polycircles', u'Polycircles Documentation',
[u'Adam Matan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Polycircles', u'Polycircles Documentation',
u'Adam Matan', 'Polycircles', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python\n\n\"\"\"\n================================================\nABElectronics IO Pi Tests | __init__\n\nRequires python smbus to be installed\nFor Python 2 install with: sudo apt-get install python-smbus\nFor Python 3 install... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"\n================================================\nABElectronics IO Pi Tests | __init__\n\nRequires python smbus to be installed\nFor Python 2 install with: sudo apt-get install python-smbus\nFor ... | ```python
#!/usr/bin/env python
"""
================================================
ABElectronics IO Pi Tests | __init__
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python3 IOPi_init.py
================================================
This test validates the __init__ function in the IOPi class.
Hardware Required: Logic Analyser on I2C Pins
=== Expected Result ============================
> Console Output:
I2C address low boundary check: PASSED
I2C address high boundary check: PASSED
I2C initialise boundary check: PASSED
Object created without initialisation
Object created with initialisation
> Logic Analyser Output:
W 0x20 0xA0 0x02
10ms delay
W 0x20 0xA0 0x02
W 0x20 0x00 0xFF 0xFF
W 0x20 0x0C 0x00 0x00
W 0x20 0x02 0x00 0x00
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import time
try:
import sys
sys.path.append("..")
from IOPi import IOPi
except ImportError:
raise ImportError("Failed to import IOPi library")
def main():
"""
Main program function
"""
passed = True
# Check i2c address for low out of bounds
try:
a = IOPi(0x19)
del a
pass
except ValueError:
print("I2C address low boundary check: PASSED")
pass
except IOError:
passed = False
print("I2C address low boundary check: FAILED")
else:
passed = False
print("I2C address low boundary check: FAILED")
pass
# Check i2c address for high out of bounds
try:
b = IOPi(0x28)
del b
pass
except ValueError:
print("I2C address high boundary check: PASSED")
pass
else:
passed = False
print("I2C address high boundary check: FAILED")
pass
# Check initialise parameter for out of bounds
try:
b = IOPi(0x20, 42)
del b
pass
except ValueError:
print("I2C initialise boundary check: PASSED")
pass
else:
passed = False
print("I2C initialise boundary check: FAILED")
pass
# Logic analyser test
print("Object created without initialisation")
c = IOPi(0x20, False)
del c
time.sleep(0.01) # sleep 10ms
print("Object created with initialisation")
d = IOPi(0x20, True)
del d
if passed is False:
print("Test Failed")
if __name__ == "__main__":
main()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#encoding: utf-8\nimport json\n\nimport gconf\nimport MySQLdb\nfrom dbutils import execute_fetch_sql\nfrom dbutils import execute_commit_sql\n\n'''\n获取用户信息\n'''\ndef get_users():\n _columns = ('id','username','password','age'... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#encoding: utf-8\nimport json\n\nimport gconf\nimport MySQLdb\nfrom dbutils import execute_fetch_sql\nfrom dbutils import execute_commit_sql\n\n'''\n获取用户信息\n'''\ndef get_users():\n _columns = ('id','username',... | ```python
#encoding: utf-8
import json
import gconf
import MySQLdb
from dbutils import execute_fetch_sql
from dbutils import execute_commit_sql
'''
获取用户信息
'''
def get_users():
_columns = ('id','username','password','age')
_sql = 'select * from user'
_count,_rt_list = execute_fetch_sql(_sql)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
print _rt
return _rt
'''
保存用户信息
'''
def save_users(users):
fhandler = open(gconf.USER_FILE, 'wb')
fhandler.write(json.dumps(users))
fhandler.close()
'''
进行用户登录验证
True/False: 用户名和密码验证成功/用户名或密码错误
如果有一个用户的username&password 与输入相同则登录成功
如果所有用户的username&password 与输入不相同则登录失败
'''
def validate_login(username, password):
#_sql = 'select * from user where username="{username}" and password=md5("{password}")'.format(username=username,password=password)
_sql = 'select * from user where username=%s and password=md5(%s)'
_count,_rt_list = execute_fetch_sql(_sql,(username,password))
return _count != 0
'''
验证添加用户的信息
True/False, 描述信息
'''
def validate_add_user(username, password, age):
users = get_users()
for user in users:
if user.get('username') == username:
return False, u'用户名已经存在'
if len(password) < 6:
return False, u'密码长度至少为6位'
if not str(age).isdigit() or int(age) < 0 or int(age) > 100:
return False, u'年龄不正确'
return True, ''
'''
添加用户信息
'''
def add_user(username, password, age):
_sql = 'insert into user(username,password,age) values (%s,md5(%s),%s) '
_args = (username,password,age)
_count = execute_commit_sql(_sql,(username,password,age))
'''
获取用户信息
'''
def get_user(username):
users = get_users()
for user in users:
if user.get('username') == username:
return user
return None
def get_user_id(id,fetch=True):
_columns = ('id','username','password','age')
_sql = 'select * from user where id=%s'
_args = (id)
_count, _rt_list = execute_fetch_sql(_sql,_args)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
return _rt
#get_user_id(19)
'''
验证用户更新
'''
def validate_update_user(username, password, age,*args):
if get_user(username) is None:
return False, u'用户信息不存在'
if len(password) < 6:
return False, u'密码长度至少为6位'
if not str(age).isdigit() or int(age) < 0 or int(age) > 100:
return False, u'年龄不正确'
return True, ''
'''
验证用户更新
'''
def validate_update_user_age(uid, user_age,*args):
if get_user_id(uid) is None:
return False, u'用户信息不存在'
if not str(user_age).isdigit() or int(user_age) <= 0 or int(user_age) > 100:
return False, u'年龄输入错误'
return True, ''
'''
更新用户信息
'''
def update_user(user_age,uid):
_sql = 'update user set age=%s where id=%s'
_args = (user_age,uid)
_count = execute_commit_sql(_sql,_args)
'''
验证用户
'''
def validate_delete_user(uid):
if get_user_id(uid) is None:
return False, u'用户信息不存在'
return True, ''
'''
删除用户信息
'''
def delete_user(uid):
_sql = 'delete from user where id=%s '
_args = (uid)
_count = execute_commit_sql(_sql,_args)
'''
验证用户信息
'''
def validate_charge_user_password(uid,user_password,username,manager_password):
if not validate_login(username,manager_password):
return False,u'管理员密码错误'
if get_user(username) is None:
return False, u'用户信息不存在'
if len(user_password) < 6:
return False, u'密码长度至少为6位'
return True,''
'''
修改用户密码
'''
def charge_user_password(uid,user_password):
_sql = 'update user set password=md5(%s) where id=%s'
_args = (user_password,uid)
_count = execute_commit_sql(_sql, _args)
'''
日志信息显示
'''
def accesslog(topn):
_columns = ('count','url','ip','code')
_sql = 'select * from accesslog limit %s'
_args = (topn)
_count, _rt_list = execute_fetch_sql(_sql,_args)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
return _rt
if __name__ == '__main__':
print accesslog(1)
# update_user('aa','123456',88,18)
#get_userid("aa")
#print get_userid()
#print validate_login('kk', '123456')
#print validate_login('kk', '1234567')
#print validate_login('woniu', '123456')
#username = 'woniu1'
#password = '123456'
#age = '28'
#_is_ok, _error = validate_add_user(username, password, age)
#if _is_ok:
# add_user(username, password, age)
#else:
# print _error
#
#print delete_user('woniu2')
#print validate_update_user('woniu2', password, age)[1]
#print validate_update_user('kk', password, 'ac')[1]
#_is_ok, _error = validate_update_user('kk', password, 30)
#if _is_ok:
# update_user('kk', 'abcdef', 31)
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\nfrom rest_framework import serializers\n\nfrom .core import MoneyField\n\n\nclass StateListField(serializers.ListField):\n id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)\n state = serializers.CharField(sou... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\nfrom rest_framework import serializers\n\nfrom .core import MoneyField\n\n\nclass StateListField(serializers.ListField):\n id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)\n state = serialize... | ```python
from rest_framework import serializers
from .core import MoneyField
class StateListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
state = serializers.CharField(source='name', read_only=True)
description = serializers.CharField(read_only=True)
class CategoryListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
code = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
tour = serializers.BooleanField(read_only=True)
talk = serializers.BooleanField(read_only=True)
instruction = serializers.BooleanField(read_only=True)
collective = serializers.BooleanField(read_only=True)
winter = serializers.BooleanField(read_only=True)
summer = serializers.BooleanField(read_only=True)
indoor = serializers.BooleanField(read_only=True)
class CollectiveListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
code = serializers.CharField(source='category.code', read_only=True)
title = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
class TopicListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
code = serializers.CharField(source='category.code', read_only=True)
title = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
qualificationIds = serializers.PrimaryKeyRelatedField(source='qualifications', many=True, read_only=True)
preconditions = serializers.CharField(read_only=True)
equipmentIds = serializers.PrimaryKeyRelatedField(source='equipments', many=True, read_only=True)
miscEquipment = serializers.CharField(source='misc_equipment', read_only=True)
class ApproximateListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
startTime = serializers.TimeField(source='start_time', read_only=True)
class EquipmentListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
code = serializers.CharField(read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
class SkillListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
level = serializers.CharField(read_only=True)
code = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
class FitnessListField(serializers.ListField):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True)
level = serializers.CharField(read_only=True)
code = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
class InstructionCostListField(serializers.ListField):
id = serializers.IntegerField(read_only=True)
level = serializers.IntegerField(read_only=True)
duration = serializers.IntegerField(read_only=True)
compensation = MoneyField(read_only=True)
class TourCostSerializer(serializers.Serializer):
halfDay = MoneyField(source='half_day', read_only=True)
wholeDay = MoneyField(source='whole_day', read_only=True)
admissionMinimum = MoneyField(source='min_admission', read_only=True)
class OpeningHourListField(serializers.ListField):
days = serializers.CharField(read_only=True)
hours = serializers.CharField(read_only=True)
class OpeningModeSerializer(serializers.Serializer):
default = OpeningHourListField()
special = OpeningHourListField()
class OpeningHourSerializer(serializers.Serializer):
office = OpeningModeSerializer()
desk = OpeningModeSerializer()
class ValueSerializer(serializers.Serializer):
states = StateListField()
categories = CategoryListField()
approximates = ApproximateListField()
equipments = EquipmentListField()
skills = SkillListField()
fitness = FitnessListField()
topics = TopicListField()
collectives = CollectiveListField()
travelCostFactor = MoneyField(source='travel_cost_factor')
accommodationCostMaximum = MoneyField(source='max_accommodation')
accommodationCostDefault = MoneyField(source='accommodation')
tourCalculationValues = TourCostSerializer(source='tour_calculation')
instructionCalculationValues = InstructionCostListField(source='instruction_calculation')
openingHours = OpeningHourSerializer(source='opening_hours')
``` |
[
{
"content": "Here is the code block:\n```python\n#-------------------------------- Variables ---------------------------#\n\n# input to build the training set\npath_data = '/Users/viherm/Desktop/CARS'\npath_training = '/Users/viherm/Desktop/trainingset'\n\n# input to train the U-Net\npath_model = '/Users/viher... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n#-------------------------------- Variables ---------------------------#\n\n# input to build the training set\npath_data = '/Users/viherm/Desktop/CARS'\npath_training = '/Users/viherm/Desktop/trainingset'\n\n# input to train the U-Net\npath_model... | ```python
#-------------------------------- Variables ---------------------------#
# input to build the training set
path_data = '/Users/viherm/Desktop/CARS'
path_training = '/Users/viherm/Desktop/trainingset'
# input to train the U-Net
path_model = '/Users/viherm/Desktop/data/models/model_init'
path_model_new = '/Users/viherm/Desktop/data/models/model_new'
# input to train the mrf
paths_training = ['/Users/viherm/Desktop/CARS/data1','/Users/viherm/Desktop/CARS/data2', '/Users/viherm/Desktop/CARS/data3']
path_mrf = '/Users/viherm/Desktop/data/models/mrf'
# input to segment an image
path_my_data = '/Users/viherm/Desktop/data2segment/mydata'
#-----------------------------------------------------------------------------------------------#
from AxonDeepSeg.learning.data_construction import build_data
build_data(path_data, path_training, trainRatio=0.80)
#----------------------Training the U-Net from a path_training----------------------------------#
from AxonDeepSeg.learn_model import learn_model
learn_model(path_training, path_model, learning_rate=0.005)
#-----------------------Initialize the training-------------------------------------------------#
learn_model(path_training, path_model_new, path_model, learning_rate=0.002)
#----------------------Visualization of the training---------------------#
from AxonDeepSeg.evaluation.visualization import visualize_learning
visualize_learning(path_model)
#--------------------Training on (GPU Bireli)---------------------------------------------------#
# ---- In a terminal window ------
# $ scp path_training neuropoly@bireli.neuro.polymtl.ca:my_project
# $ scp path_model neuropoly@bireli.neuro.polymtl.ca:my_project
# $ cd AxonSegmentation/AxonDeepSeg
# $ python learn_model.py -p path_bireli_training -m path_bireli_model_new -lr 0.0005
# or
# $ python learn_model.py -p path_bireli_training -m path_bireli_model_new -m_init path_bireli_model_init -lr 0.0005
#- In a new window to visualize the training performances
# $ scp -r path_bireli_model_new path_model_new
# #----------------------Training the MRF from the paths_training---------------------#
from AxonDeepSeg.mrf import learn_mrf
learn_mrf(paths_training, path_mrf)
#----------------------Axon segmentation with a trained model and trained mrf---------------------#
from AxonDeepSeg.apply_model import axon_segmentation
axon_segmentation(path_my_data, path_model, path_mrf)
#----------------------Myelin segmentation from Axon segmentation--------------------#
from AxonDeepSeg.apply_model import myelin
myelin(path_my_data)
#----------------------Axon and Myelin segmentation--------------------#
from AxonDeepSeg.apply_model import pipeline
pipeline(path_my_data,path_model,path_mrf)
#----------------------Visualization of the results--------------------#
from AxonDeepSeg.evaluation.visualization import visualize_results
visualize_results(path_my_data)
``` |
[
{
"content": "```python\nfrom flask import request, g, jsonify\nfrom flask_cors import cross_origin\n\nfrom alerta.app.auth.utils import permission\nfrom alerta.app.models.alert import Alert\nfrom alerta.app.utils.api import process_alert, add_remote_ip\nfrom alerta.app.exceptions import ApiError, RejectExcepti... | [
{
"content": "<|memory_start|>```python\nfrom flask import request, g, jsonify\nfrom flask_cors import cross_origin\n\nfrom alerta.app.auth.utils import permission\nfrom alerta.app.models.alert import Alert\nfrom alerta.app.utils.api import process_alert, add_remote_ip\nfrom alerta.app.exceptions import ApiErro... | ```python
from flask import request, g, jsonify
from flask_cors import cross_origin
from alerta.app.auth.utils import permission
from alerta.app.models.alert import Alert
from alerta.app.utils.api import process_alert, add_remote_ip
from alerta.app.exceptions import ApiError, RejectException
from . import webhooks
# {
# "second_probe": {},
# "check_type": "HTTP",
# "first_probe": {},
# "tags": [],
# "check_id": 803318,
# "current_state": "DOWN",
# "check_params": {
# "url": "/",
# "encryption": false,
# "hostname": "api.alerta.io",
# "basic_auth": false,
# "port": 80,
# "header": "User-Agent:Pingdom.com_bot_version_1.4_(http://www.pingdom.com/)",
# "ipv6": false,
# "full_url": "http://api.alerta.io/"
# },
# "previous_state": "UP",
# "check_name": "Alerta API on OpenShift",
# "version": 1,
# "state_changed_timestamp": 1498859836,
# "importance_level": "HIGH",
# "state_changed_utc_time": "2017-06-30T21:57:16",
# "long_description": "This is a test message triggered by a user in My Pingdom",
# "description": "test"
# }
def parse_pingdom(check):
if check['importance_level'] == 'HIGH':
severity = 'critical'
else:
severity = 'warning'
if check['current_state'] == 'UP':
severity = 'normal'
return Alert(
resource=check['check_name'],
event=check['current_state'],
correlate=['UP', 'DOWN'],
environment='Production',
severity=severity,
service=[check['check_type']],
group='Network',
value=check['description'],
text='%s: %s' % (check['importance_level'], check['long_description']),
tags=check['tags'],
attributes={'checkId': check['check_id']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check
)
@webhooks.route('/webhooks/pingdom', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission('write:webhooks')
def pingdom():
try:
incomingAlert = parse_pingdom(request.json)
except ValueError as e:
return jsonify(status="error", message=str(e)), 400
if g.get('customer', None):
incomingAlert.customer = g.get('customer')
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
return jsonify(status="error", message=str(e)), 403
except Exception as e:
return jsonify(status="error", message=str(e)), 500
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of pingdom check failed", 500)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n###############################################################################\n#\n# Tests for XlsxWriter.\n#\n# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org\n#\n\nfrom ..excel_comparsion_test import ExcelComparisonTest\nfrom ..... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n###############################################################################\n#\n# Tests for XlsxWriter.\n#\n# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org\n#\n\nfrom ..excel_comparsion_test import ExcelCompari... | ```python
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'autofilter07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.txt_filename = test_dir + 'xlsx_files/' + 'autofilter_data.txt'
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
Test autofilters where column filter ids are relative to autofilter
range.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('D3:G53')
# Add filter criteria.
worksheet.filter_column('D', 'region == East')
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('D3', headers)
# Start writing data after the headers.
row = 3
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region == 'East':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 3, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom ansicolor import red\nfrom ansicolor import cyan\nfrom ansicolor import green\nfrom ansicolor import blue\n\nfrom django.db.models import Q\n\nfrom urllib import urlencode\n\nfrom parlament.settings import BASE_HOST\nfrom parlament.spiders.... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom ansicolor import red\nfrom ansicolor import cyan\nfrom ansicolor import green\nfrom ansicolor import blue\n\nfrom django.db.models import Q\n\nfrom urllib import urlencode\n\nfrom parlament.settings import BASE_HOST\nfrom pa... | ```python
# -*- coding: utf-8 -*-
import scrapy
from ansicolor import red
from ansicolor import cyan
from ansicolor import green
from ansicolor import blue
from django.db.models import Q
from urllib import urlencode
from parlament.settings import BASE_HOST
from parlament.spiders.persons import PersonsSpider
from parlament.resources.extractors.law import *
from parlament.resources.extractors.prelaw import *
from parlament.resources.extractors.person import *
from parlament.resources.extractors.opinion import *
from parlament.resources.extractors.administration import *
from op_scraper.models import Person
from op_scraper.models import Function
from op_scraper.models import Mandate
from op_scraper.models import Administration
from op_scraper.models import LegislativePeriod
class AdministrationsSpider(PersonsSpider):
BASE_URL = "{}/{}".format(BASE_HOST, "WWER/BREG/REG/filter.psp")
URLOPTIONS_ADMIN = {
'jsMode': '',
'xdocumentUri': '/WWER/BREG/REG/index.shtml',
'REG': '0',
'anwenden': 'Anwenden',
'FUNK': 'ALLE',
'RESS': 'ALLE',
'SUCH': '',
'listeId': '16',
'FBEZ': 'FW_016',
'pageNumber': '',
}
LLP = []
name = "administrations"
title = "Administrations (Regierungen) Spider"
persons_scraped = []
def __init__(self, **kw):
super(AdministrationsSpider, self).__init__(**kw)
self.start_urls = self.get_urls()
self.cookies_seen = set()
self.idlist = {}
#self.print_debug()
def get_urls(self):
"""
Overwritten from BaseSpider for non-LLP-based retrieval
"""
urls = []
url_options = urlencode(self.URLOPTIONS_ADMIN)
url = "{}?{}".format(self.BASE_URL, url_options)
urls.append(url)
return urls
def parse(self, response):
persons = ADMINISTRATION.LIST.xt(response)
callback_requests = []
self.logger.info(
"Scraping {} persons".format(len(persons)))
# Iterate all persons
for p in persons:
# Extract basic data
parl_id = p['source_link'].split('/')[-2]
p['source_link'] = "{}{}".format(BASE_HOST, p['source_link'])
# Create or update simple person's item
person_data = {
'reversed_name': p['reversed_name']
}
person_item, created_person = Person.objects.update_or_create(
source_link=p['source_link'],
parl_id=parl_id,
defaults=person_data
)
if created_person:
self.logger.debug(u"Created Person {}".format(
green(u'[{}]'.format(p['reversed_name']))))
else:
self.logger.debug(u"Updated Person {}".format(
green(u"[{}]".format(p['reversed_name']))
))
mandate = p['mandate']
administration_item = self.get_administration_item(mandate)
function_item, f_created = Function.objects.get_or_create(
short=mandate['short'],
title=mandate['title'])
if f_created:
self.logger.debug(u"Created function {}".format(
green(u'[{}]'.format(function_item.short))))
# Create and append mandate
try:
mandate_item, m_created = Mandate.objects.update_or_create(
person=person_item,
function=function_item,
administration=administration_item)
# Let's try to find a matching LLP for this administration so we can
# add it to this mandate
try:
llps = LegislativePeriod.objects\
.filter(
start_date__lte=mandate[
'administration']['end_date']
or datetime.date.today())\
.filter(
Q(end_date__isnull=True) | Q(
end_date__gte=mandate[
'administration']['start_date']
))\
.all()
if llps:
# always pick the latest, in case the adminstration
# overlapped
mandate_item.legislative_period = llps[
llps.count() - 1]
mandate_item.save()
except Exception as e:
# # nope, that didn't work, but nevermind #passiveaggressivecomment
# print e.message
# import ipdb
# ipdb.set_trace()
pass
except:
self.logger.warning(
red("Error saving Mandate {} ({})".format(function_item, administration_item)))
import ipdb
ipdb.set_trace()
person_item.save()
# First time we encounter a person, we scan her detail page too
if not parl_id in self.persons_scraped:
# Create Detail Page request
req = scrapy.Request(p['source_link'],
callback=self.parse_person_detail)
req.meta['person'] = {
'reversed_name': p['reversed_name'],
'source_link': p['source_link'],
'parl_id': parl_id
}
callback_requests.append(req)
self.persons_scraped.append(parl_id)
return callback_requests
def get_administration_item(self, mandate):
# Do we have this administration already?
admin_data = {
'start_date': mandate['administration']['start_date'],
'end_date': mandate['administration']['end_date']
}
admin_item, created = Administration.objects.update_or_create(
title=mandate['administration']['title'][0],
defaults=admin_data)
if created:
admin_item.save()
self.logger.debug(u"Created administration {}".format(
green(u'[{}]'.format(admin_item.title))))
return admin_item
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python\n\nclass Graph:\n \"Plot various graphs into burndown chart\"\n\n def __init__ (self, graph_data):\n self.getGraphData(graph_data)\n\n def getGraphData(self, graph_data):\n self.x =... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nclass Graph:\n \"Plot various graphs into burndown chart\"\n\n def __init__ (self, graph_data):\n self.getGraphData(graph_data)\n\n def getGraphData(self, graph_data... | ```python
#!/usr/bin/env python
class Graph:
"Plot various graphs into burndown chart"
def __init__ (self, graph_data):
self.getGraphData(graph_data)
def getGraphData(self, graph_data):
self.x = graph_data['x']
self.y = graph_data['y']
self.xy_extra = 0
self.ymin = graph_data['ymin']
self.ymax = graph_data['ymax']
self.total = graph_data['total']
self.plot_count = graph_data['plot_count']
self.draw_tasks_diff = graph_data['draw_tasks_diff']
self.draw_bonus_tasks_diff = graph_data['draw_bonus_tasks_diff']
if 'x_extra' in graph_data:
self.x_extra = graph_data['x_extra']
self.y_extra = graph_data['y_extra']
self.xy_extra = 1
if self.draw_tasks_diff:
self.x_arrow_start_end = graph_data['x_arrow_start_end']
self.y_arrow_start = graph_data['y_arrow_start']
self.y_arrow_end = graph_data['y_arrow_end']
self.y_text = graph_data['y_text']
if self.draw_bonus_tasks_diff:
self.y_arrow_start_bonus = graph_data['y_arrow_start_bonus']
self.y_arrow_end_bonus = graph_data['y_arrow_end_bonus']
self.y_text_bonus = graph_data['y_text_bonus']
self.bonus_tasks_day_one = graph_data['bonus_tasks_day_one']
self.subplot = graph_data['subplot']
return
def draw(self, y_label, color, marker, linestyle, linewidth, plot):
self.plot = plot
self.subplot.set_ylabel(y_label, color=color)
self.subplot.set_ylim([self.ymin, self.ymax])
if self.plot_count == 1:
self.subplot.tick_params(axis='y', colors=color)
if self.plot_count >= 2:
self.subplot.tick_params(axis='y', colors=color)
self.subplot.spines['right'].set_position(('axes', 1.15))
self.plot.fig.subplots_adjust(right=0.8)
self.subplot.plot(self.x, self.y, color=color, marker=marker, linestyle=linestyle, linewidth=linewidth)
self.drawBonus(color, marker, linestyle, linewidth)
self.drawBars(color)
if self.draw_tasks_diff:
self.drawTasksDiff(color)
if self.draw_bonus_tasks_diff:
self.drawBonusTasksDiff(color)
return
def drawBonus(self, color, marker, linestyle, linewidth):
if self.xy_extra and len(self.x_extra) > 0:
self.subplot.plot(self.x_extra, self.y_extra, color=color, marker=marker, linestyle=linestyle, linewidth=linewidth)
return
def drawBars(self, color):
if len(self.total) > 1:
width = 0.2
offset = 0
if self.plot_count == 1:
offset = -width
new = [0, 0]
for i in range(1, len(self.total)):
new.append(self.total[i] - self.total[i - 1])
additional_days = []
additional = []
for i in range(len(new)):
if new[i] != 0:
additional_days.append(i + offset)
additional.append(new[i])
if len(additional) > 0:
self.subplot.bar(additional_days, additional, width, color=color)
return
def drawTasksDiff(self, color):
tasks_done = self.total[0] - self.y[0]
if tasks_done > 0:
self.subplot.annotate("",
xy=(self.x_arrow_start_end, self.y_arrow_start), xycoords='data',
xytext=(self.x_arrow_start_end, self.y_arrow_end), textcoords='data',
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", color=color)
)
self.subplot.text(0.7, self.y_text, str(int(tasks_done)) + " tasks done",
rotation='vertical', verticalalignment='top', color=color
)
return
def drawBonusTasksDiff(self, color):
if self.bonus_tasks_day_one:
self.subplot.annotate("",
xy=(self.x_arrow_start_end, self.y_arrow_start_bonus), xycoords='data',
xytext=(self.x_arrow_start_end, self.y_arrow_end_bonus), textcoords='data',
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", color=color)
)
self.subplot.text(0.4, self.y_text_bonus, str(int(-self.y_extra[0])) + " extra",
rotation='vertical', verticalalignment='center', color=color
)
self.subplot.text(0.7, self.y_text_bonus, "tasks done",
rotation='vertical', verticalalignment='center', color=color
)
return
``` |
[
{
"content": "```python\n\"\"\"\nRecurrent layers.\n\n\nTODO: write more documentation\n\"\"\"\n__docformat__ = 'restructedtext en'\n__authors__ = (\"Razvan Pascanu \"\n \"KyungHyun Cho \"\n \"Caglar Gulcehre \")\n__contact__ = \"Razvan Pascanu <r.pascanu@gmail>\"\n\nimport numpy\nim... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nRecurrent layers.\n\n\nTODO: write more documentation\n\"\"\"\n__docformat__ = 'restructedtext en'\n__authors__ = (\"Razvan Pascanu \"\n \"KyungHyun Cho \"\n \"Caglar Gulcehre \")\n__contact__ = \"Razvan Pascanu <r.pascanu@gmail>\"\n\n... | ```python
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.nin))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.nhid)
else:
init_state = TT.alloc(floatX(0), self.nhid)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
``` |
[
{
"content": "Here is the code content:\n```python\n#!/usr/bin/python3\n\n#--------------------------------------------------------------------\n# Function: enumer_d_and_c\n# Description: Computes the maximum sub-array and the associated sum using a div# ide and conquer algorithm\n# Receives: values - list o... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n#!/usr/bin/python3\n\n#--------------------------------------------------------------------\n# Function: enumer_d_and_c\n# Description: Computes the maximum sub-array and the associated sum using a div# ide and conquer algorithm\n# Receives:... | ```python
#!/usr/bin/python3
#--------------------------------------------------------------------
# Function: enumer_d_and_c
# Description: Computes the maximum sub-array and the associated sum using a div# ide and conquer algorithm
# Receives: values - list of integers
# Returns: maximum sub-array sum, and maximum sub-array
# Preconditions: "values constains at least one positive integer
#--------------------------------------------------------------------
# Importing argv to allow the method to be used as a CL utility
from sys import argv
def enumer_d_and_c(values):
# Checking if the values array is either empty or only contains
# a single element
if len(values) == 0:
return 0, values
elif len(values) == 1:
return values[0], values
# Initializing variables to track the maximums and the indices for the
# middle max subarray to check against the left and right halves
tempmax = 0
midmax = 0
midstart = 0
midend = 0
leftmax = 0
rightmax = 0
# Calculating and storing the index at which the array is cut in approx.
# half
midpoint = int(len(values) / 2)
midstart = midpoint
midend = midpoint
# Reverse iterating through the values array starting from the midpoint
# and ending at the first element
for i in reversed(range(midpoint)):
tempmax += values[i]
if tempmax > leftmax:
leftmax = tempmax
midstart = i
# Resetting the tempmax variable
tempmax = 0
# Iterating through the right half of the values array to determine
# the maximum right subarray
for i in range(midpoint, len(values)):
tempmax += values[i]
if tempmax > rightmax:
rightmax = tempmax
midend = i + 1
# Summing the leftmax and rightmax and setting that to be the midmax
midmax = leftmax + rightmax
# Recursively calling the main method to act on the left and
# right halves of the values array
leftmax, leftsubarr = enumer_d_and_c(values[:midpoint])
rightmax, rightsubarr = enumer_d_and_c(values[midpoint:])
# If-else block used to determine the biggest subarray max
# and to return that max with the subarray it reflects
if midmax >= leftmax and midmax >= rightmax:
return midmax, values[midstart:midend]
elif leftmax >= rightmax and leftmax > midmax:
return leftmax, leftsubarr
elif rightmax > leftmax and rightmax > midmax:
return rightmax, rightsubarr
# If block that allows this file to be run as a CL utility
if __name__ == "__main__":
print(enumer_d_and_c([int(x) for x in argv[1:]]))
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this fil... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF l... | ```python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Type coders registration.
This module contains functionality to define and use coders for custom classes.
Let's say we have a class Xyz and we are processing a PCollection with elements
of type Xyz. If we do not register a coder for Xyz, a default pickle-based
fallback coder will be used. This can be undesirable for two reasons. First, we
may want a faster coder or a more space efficient one. Second, the pickle-based
coder is not deterministic in the sense that objects like dictionaries or sets
are not guaranteed to be encoded in the same way every time (elements are not
really ordered).
Two (sometimes three) steps are needed to define and use a custom coder:
- define the coder class
- associate the code with the class (a.k.a. coder registration)
- typehint DoFns or transforms with the new class or composite types using
the class.
A coder class is defined by subclassing from CoderBase and defining the
encode_to_bytes and decode_from_bytes methods. The framework uses duck-typing
for coders so it is not strictly required to subclass from CoderBase as long as
the encode/decode methods are defined.
Registering a coder class is made with a register_coder() call::
from apache_beam import coders
...
coders.registry.register_coder(Xyz, XyzCoder)
Additionally, DoFns and PTransforms may need type hints. This is not always
necessary since there is functionality to infer the return types of DoFns by
analyzing the code. For instance, for the function below the return type of
'Xyz' will be inferred::
def MakeXyzs(v):
return Xyz(v)
If Xyz is inferred then its coder will be used whenever the framework needs to
serialize data (e.g., writing to the shuffler subsystem responsible for group by
key operations). If a typehint is needed it can be specified by decorating the
DoFns or using with_input_types/with_output_types methods on PTransforms. For
example, the above function can be decorated::
@with_output_types(Xyz)
def MakeXyzs(v):
return complex_operation_returning_Xyz(v)
See apache_beam.typehints.decorators module for more details.
"""
from __future__ import absolute_import
from builtins import object
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Type
from past.builtins import unicode
from apache_beam.coders import coders
from apache_beam.typehints import typehints
__all__ = ['registry']
class CoderRegistry(object):
"""A coder registry for typehint/coder associations."""
def __init__(self, fallback_coder=None):
self._coders = {} # type: Dict[Any, Type[coders.Coder]]
self.custom_types = [] # type: List[Any]
self.register_standard_coders(fallback_coder)
def register_standard_coders(self, fallback_coder):
"""Register coders for all basic and composite types."""
self._register_coder_internal(int, coders.VarIntCoder)
self._register_coder_internal(float, coders.FloatCoder)
self._register_coder_internal(bytes, coders.BytesCoder)
self._register_coder_internal(bool, coders.BooleanCoder)
self._register_coder_internal(unicode, coders.StrUtf8Coder)
self._register_coder_internal(typehints.TupleConstraint, coders.TupleCoder)
# Default fallback coders applied in that order until the first matching
# coder found.
default_fallback_coders = [coders.ProtoCoder, coders.FastPrimitivesCoder]
self._fallback_coder = fallback_coder or FirstOf(default_fallback_coders)
def _register_coder_internal(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
self._coders[typehint_type] = typehint_coder_class
def register_coder(self, typehint_type, typehint_coder_class):
# type: (Any, Type[coders.Coder]) -> None
if not isinstance(typehint_coder_class, type):
raise TypeError('Coder registration requires a coder class object. '
'Received %r instead.' % typehint_coder_class)
if typehint_type not in self.custom_types:
self.custom_types.append(typehint_type)
self._register_coder_internal(typehint_type, typehint_coder_class)
def get_coder(self, typehint):
# type: (Any) -> coders.Coder
coder = self._coders.get(
typehint.__class__ if isinstance(typehint, typehints.TypeConstraint)
else typehint, None)
if isinstance(typehint, typehints.TypeConstraint) and coder is not None:
return coder.from_type_hint(typehint, self)
if coder is None:
# We use the fallback coder when there is no coder registered for a
# typehint. For example a user defined class with no coder specified.
if not hasattr(self, '_fallback_coder'):
raise RuntimeError(
'Coder registry has no fallback coder. This can happen if the '
'fast_coders module could not be imported.')
if isinstance(typehint, (typehints.IterableTypeConstraint,
typehints.ListConstraint)):
return coders.IterableCoder.from_type_hint(typehint, self)
elif typehint is None:
# In some old code, None is used for Any.
# TODO(robertwb): Clean this up.
pass
elif typehint is object or typehint == typehints.Any:
# We explicitly want the fallback coder.
pass
elif isinstance(typehint, typehints.TypeVariable):
# TODO(robertwb): Clean this up when type inference is fully enabled.
pass
else:
# TODO(robertwb): Re-enable this warning when it's actionable.
# warnings.warn('Using fallback coder for typehint: %r.' % typehint)
pass
coder = self._fallback_coder
return coder.from_type_hint(typehint, self)
def get_custom_type_coder_tuples(self, types):
"""Returns type/coder tuples for all custom types passed in."""
return [(t, self._coders[t]) for t in types if t in self.custom_types]
def verify_deterministic(self, key_coder, op_name, silent=True):
if not key_coder.is_deterministic():
error_msg = ('The key coder "%s" for %s '
'is not deterministic. This may result in incorrect '
'pipeline output. This can be fixed by adding a type '
'hint to the operation preceding the GroupByKey step, '
'and for custom key classes, by writing a '
'deterministic custom Coder. Please see the '
'documentation for more details.' % (key_coder, op_name))
return key_coder.as_deterministic_coder(op_name, error_msg)
else:
return key_coder
class FirstOf(object):
"""For internal use only; no backwards-compatibility guarantees.
A class used to get the first matching coder from a list of coders."""
def __init__(self, coders):
# type: (Iterable[Type[coders.Coder]]) -> None
self._coders = coders
def from_type_hint(self, typehint, registry):
messages = []
for coder in self._coders:
try:
return coder.from_type_hint(typehint, self)
except Exception as e:
msg = ('%s could not provide a Coder for type %s: %s' %
(coder, typehint, e))
messages.append(msg)
raise ValueError('Cannot provide coder for %s: %s' %
(typehint, ';'.join(messages)))
registry = CoderRegistry()
``` |
[
{
"content": "Here is the code content:\n```python\n# coding:utf-8\nimport json\nfrom cryptacular.bcrypt import BCRYPTPasswordManager\nfrom webob import exc\nfrom webob.response import Response\n\n\ndef convert_datetime(model_object):\n model_object['pubDate'] = model_object['pubDate'].strftime(\"%Y-%m-%d %H... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# coding:utf-8\nimport json\nfrom cryptacular.bcrypt import BCRYPTPasswordManager\nfrom webob import exc\nfrom webob.response import Response\n\n\ndef convert_datetime(model_object):\n model_object['pubDate'] = model_object['pubDate'].strfti... | ```python
# coding:utf-8
import json
from cryptacular.bcrypt import BCRYPTPasswordManager
from webob import exc
from webob.response import Response
def convert_datetime(model_object):
model_object['pubDate'] = model_object['pubDate'].strftime("%Y-%m-%d %H:%M:%S")
model_object['modifiedDate'] = model_object['modifiedDate'].strftime("%Y-%m-%d %H:%M:%S")
return model_object
def convert_price(price_int):
price_float = "{0:.2f}".format(price_int / 100.0)
return price_float
def get_id_from_ref(source):
return str(source.id)
PRODUCT_MASTER_COLLECTION = 'product_master'
PRODUCT_COLLECTION = 'product'
PRODUCT_IMAGE_COLLECTION = 'product_image'
PRODUCT_TAG_COLLECTION = 'product_tag'
class UnauthorizedView(exc.HTTPError):
def __init__(self, msg=u'Unauthorized'):
body = {'status': 401, 'message': msg}
Response.__init__(self, json.dumps(body))
self.status = 401
self.content_type = 'application/json'
class BadRequestView(exc.HTTPError):
def __init__(self, msg=u'Bad request, missing data.'):
body = {'status': 400, 'message': msg}
Response.__init__(self, json.dumps(body))
self.status = 400
self.content_type = 'application/json'
class NotFoundView(exc.HTTPError):
def __init__(self, msg=u'Not Found.'):
body = {'status': 404, 'message': msg}
Response.__init__(self, json.dumps(body))
self.status = 404
self.content_type = 'application/json'
password_manager = BCRYPTPasswordManager()
``` |
[
{
"content": "Here is the snippet:\n```python\n#pylint: disable=invalid-name, too-few-public-methods, too-many-public-methods\n#pylint: disable=protected-access, missing-docstring, too-many-locals\n#pylint: disable=too-many-arguments\n#pylint: disable=deprecated-method\n\nfrom __future__ import print_function, ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#pylint: disable=invalid-name, too-few-public-methods, too-many-public-methods\n#pylint: disable=protected-access, missing-docstring, too-many-locals\n#pylint: disable=too-many-arguments\n#pylint: disable=deprecated-method\n\nfrom __future__ import ... | ```python
#pylint: disable=invalid-name, too-few-public-methods, too-many-public-methods
#pylint: disable=protected-access, missing-docstring, too-many-locals
#pylint: disable=too-many-arguments
#pylint: disable=deprecated-method
from __future__ import print_function, absolute_import, division
from argparse import Namespace
import os
from test.utils_test import BaseConnorTestCase
from testfixtures.tempdirectory import TempDirectory
import connor.command_validator as validator
import connor.utils as utils
from connor.utils import UsageError
class MockTask(object):
def __init__(self, error_message=None):
self.error_message = error_message
self.args = None
self.execute_called = False
self.log = None
def execute(self, args, log):
self.execute_called = True
if self.error_message:
raise Exception(self.error_message)
else:
self.args = args
self.log = log
class CommandValidatorTest(BaseConnorTestCase):
def test_Validations(self):
function_names = [f.__name__ for f in validator._VALIDATIONS]
self.assertEqual(['_check_input_bam_exists',
'_check_input_bam_valid',
'_check_input_bam_indexed',
'_check_input_bam_not_deduped',
'_check_input_bam_not_empty',
'_check_input_bam_no_secondary',
'_check_input_bam_paired',
'_check_input_bam_properly_paired',
'_check_input_bam_consistent_length',
'_check_overwrite_output'],
function_names)
def test_preflight_runsAllValidations(self):
task1 = MockTask()
task2 = MockTask()
validator._VALIDATIONS = [task1.execute,
task2.execute]
args = Namespace()
log = self.mock_logger
validator.preflight(args, log)
self.assertTrue(task1.execute_called)
self.assertEqual(task1.args, args)
self.assertEqual(task1.log, log)
self.assertTrue(task2.execute_called)
self.assertEqual(task2.args, args)
self.assertEqual(task2.log, log)
def test_check_input_bam_exists_ok(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('input.bam', b'foo')
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_exists(args)
self.ok()
def test_check_input_bam_exists_raisesUsageError(self):
with TempDirectory() as tmp_dir:
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] does not exist',
validator._check_input_bam_exists,
args)
def test_check_input_bam_valid_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=False)
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_valid(args)
self.ok()
def test_check_input_bam_valid_raisesUsageError(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('input.bam', b'foo')
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] not a valid BAM',
validator._check_input_bam_valid,
args)
def test_check_input_bam_indexed_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_indexed(args)
self.ok()
def test_check_input_bam_indexed_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=False)
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] is not indexed',
validator._check_input_bam_indexed,
args)
def test_check_input_bam_not_deduped_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:bwa
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_not_deduped(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_not_deduped_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:connor
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] has already been processed with Connor'
r'.*Are you sure.*force')
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_not_deduped,
args)
def test_check_input_bam_not_deduped_noPgHeader(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_deduped(args)
self.ok()
def test_check_input_bam_not_deduped_noPnHeader(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:bwa|VN:1.3
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_deduped(args)
self.ok()
def test_check_input_bam_not_deduped_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:connor
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_not_deduped(args,
log=self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] has already been processed with Connor'
r'.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_not_empty_raiseUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] is empty',
validator._check_input_bam_not_empty,
args)
def test_check_input_bam_not_empty_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_empty(args)
self.ok()
def test_check_input_bam_no_secondary_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='256').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] contains secondary alignments\..*'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_no_secondary,
args)
def test_check_input_bam_no_secondary_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_no_secondary(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_no_secondary_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='256').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_no_secondary(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = r'\[.*input.bam\] contains secondary alignments\..*forcing'
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_paired_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='16').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] does not appear to contain paired reads'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_paired,
args)
def test_check_input_bam_paired_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{unpaired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|{paired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(unpaired_flag='16', paired_flag='99')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_paired(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_paired_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='16').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_paired(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] does not appear to contain paired '
r'reads.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_properly_paired_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] does not appear to contain any properly paired alignments'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_properly_paired,
args)
def test_check_input_bam_properly_paired_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{unpaired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|{paired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(unpaired_flag='2', paired_flag='99')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_properly_paired(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_properly_paired_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_properly_paired(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] does not appear to contain any properly paired '
r'alignments.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_barcoded_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_barcoded(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_barcoded_okAtThreshold(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_barcoded(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_barcoded_leftUnbarcodedRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] reads do not appear to have barcodes'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_barcoded,
args)
def test_check_input_bam_barcoded_rightUnbarcodedRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] reads do not appear to have barcodes'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_barcoded,
args)
def test_check_input_bam_barcoded_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_barcoded(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = r'\[.*input.bam\] reads do not appear to have barcodes.*forcing'
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_consistent_length_okAtThreshold(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_consistent_length(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_consistent_length_posRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>!!!
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*force')
self.assertRaisesRegexp(UsageError,
regex,
validator._check_input_bam_consistent_length,
args)
def test_check_input_bam_consistent_length_negRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|5M|=|300|200|AAANN|>>>!!
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*force')
self.assertRaisesRegexp(UsageError,
regex,
validator._check_input_bam_consistent_length,
args)
def test_check_input_bam_consistent_length_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>!!!
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|5M|=|300|200|AAANN|>>>!!
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_consistent_length(args, self.mock_logger)
self.ok()
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_overwrite_output_ok(self):
with TempDirectory() as tmp_dir:
# tmp_dir.write('input.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
validator._check_overwrite_output(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfDedupedPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*deduped.bam\] exist.*force',
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfAnnotatedPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('annotated.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*annotated.bam\] exist.*force',
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfBothPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
tmp_dir.write('annotated.bam', b'bar')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
regex = r'\[.*deduped.bam, .*annotated.bam\] exist.*force'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_warnIfForced(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
tmp_dir.write('annotated.bam', b'bar')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=True)
validator._check_overwrite_output(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
regex = r'\[.*deduped.bam, .*annotated.bam\] exist.*forcing'
self.assertEqual(1, len(warnings))
self.assertRegexpMatches(warnings[0], regex)
``` |
[
{
"content": "```python\n# type: ignore\nimport asyncio\nimport io\nimport json\nimport pathlib\nimport sys\nimport zlib\nfrom typing import Any, Optional\nfrom unittest import mock\n\nimport pytest\n\nimport aiohttp\nfrom aiohttp import payload\nfrom aiohttp.hdrs import (\n CONTENT_DISPOSITION,\n CONTENT... | [
{
"content": "<|memory_start|>```python\n# type: ignore\nimport asyncio\nimport io\nimport json\nimport pathlib\nimport sys\nimport zlib\nfrom typing import Any, Optional\nfrom unittest import mock\n\nimport pytest\n\nimport aiohttp\nfrom aiohttp import payload\nfrom aiohttp.hdrs import (\n CONTENT_DISPOSITI... | ```python
# type: ignore
import asyncio
import io
import json
import pathlib
import sys
import zlib
from typing import Any, Optional
from unittest import mock
import pytest
import aiohttp
from aiohttp import payload
from aiohttp.hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE,
)
from aiohttp.helpers import parse_mimetype
from aiohttp.multipart import MultipartResponseWrapper
from aiohttp.streams import StreamReader
from aiohttp.test_utils import make_mocked_coro
BOUNDARY: bytes = b"--:"
def pytest_generate_tests(metafunc: Any) -> None: # pragma: no cover
if "newline" in metafunc.fixturenames:
metafunc.parametrize("newline", [b"\r\n", b"\n"], ids=str)
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def stream(buf: Any):
writer = mock.Mock()
async def write(chunk):
buf.extend(chunk)
writer.write.side_effect = write
return writer
@pytest.fixture
def writer():
return aiohttp.MultipartWriter(boundary=":")
class Response:
headers: Any
content: Any
def __init__(self, headers: Any, content: Any) -> None:
self.headers = headers
self.content = content
class Stream:
content: Any
def __init__(self, content: Any) -> None:
self.content = io.BytesIO(content)
async def read(self, size: Optional[Any] = None):
return self.content.read(size)
def at_eof(self):
return self.content.tell() == len(self.content.getbuffer())
async def readline(self):
return self.content.readline()
def unread_data(self, data: Any) -> None:
self.content = io.BytesIO(data + self.content.read())
class StreamWithShortenRead(Stream):
def __init__(self, content: Any) -> None:
self._first = True
super().__init__(content)
async def read(self, size: Optional[Any] = None):
if size is not None and self._first:
self._first = False
size = size // 2
return await super().read(size)
class TestMultipartResponseWrapper:
def test_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.at_eof()
assert wrapper.resp.content.at_eof.called
async def test_next(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.stream.next = make_mocked_coro(b"")
wrapper.stream.at_eof.return_value = False
await wrapper.next()
assert wrapper.stream.next.called
async def test_release(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
await wrapper.release()
assert wrapper.resp.release.called
async def test_release_when_stream_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
wrapper.stream.next = make_mocked_coro(b"")
wrapper.stream.at_eof.return_value = True
await wrapper.next()
assert wrapper.stream.next.called
assert wrapper.resp.release.called
class TestPartReader:
async def test_next(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_next_next(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
result = await obj.next()
assert result is None
async def test_read(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_read_chunk_at_eof(self) -> None:
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(b"--:"))
obj._at_eof = True
result = await obj.read_chunk()
assert b"" == result
async def test_read_chunk_without_content_length(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
c1 = await obj.read_chunk(8)
c2 = await obj.read_chunk(8)
c3 = await obj.read_chunk(8)
assert c1 + c2 == b"Hello, world!"
assert c3 == b""
async def test_read_incomplete_chunk(self, newline: Any) -> None:
stream = Stream(b"")
if sys.version_info >= (3, 8, 1):
# Workaround for a weird behavior of patch.object
def prepare(data):
return data
else:
async def prepare(data):
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, "),
prepare(b"World"),
prepare(b"!%s--:" % newline),
prepare(b""),
],
):
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
c1 = await obj.read_chunk(8)
assert c1 == b"Hello, "
c2 = await obj.read_chunk(8)
assert c2 == b"World"
c3 = await obj.read_chunk(8)
assert c3 == b"!"
async def test_read_all_at_once(self, newline: Any) -> None:
data = b"Hello, World!%s--:--%s" % (newline, newline)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read_chunk()
assert b"Hello, World!" == result
result = await obj.read_chunk()
assert b"" == result
assert obj.at_eof()
async def test_read_incomplete_body_chunked(self, newline: Any) -> None:
data = b"Hello, World!%s--" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = b""
with pytest.raises(AssertionError):
for _ in range(4):
result += await obj.read_chunk(7)
assert data == result
async def test_read_boundary_with_incomplete_chunk(self, newline: Any) -> None:
stream = Stream(b"")
if sys.version_info >= (3, 8, 1):
# Workaround for weird 3.8.1 patch.object() behavior
def prepare(data):
return data
else:
async def prepare(data):
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, World"),
prepare(b"!%s" % newline),
prepare(b"--:"),
prepare(b""),
],
):
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
c1 = await obj.read_chunk(12)
assert c1 == b"Hello, World"
c2 = await obj.read_chunk(8)
assert c2 == b"!"
c3 = await obj.read_chunk(8)
assert c3 == b""
async def test_multi_read_chunk(self, newline: Any) -> None:
data = b"Hello,%s--:%s%sworld!%s--:--" % ((newline,) * 4)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read_chunk(8)
assert b"Hello," == result
result = await obj.read_chunk(8)
assert b"" == result
assert obj.at_eof()
async def test_read_chunk_properly_counts_read_bytes(self, newline: Any) -> None:
expected = b"." * 10
tail = b"%s--:--" % newline
size = len(expected)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": size},
StreamWithShortenRead(expected + tail),
_newline=newline,
)
result = bytearray()
while True:
chunk = await obj.read_chunk()
if not chunk:
break
result.extend(chunk)
assert size == len(result)
assert b"." * size == result
assert obj.at_eof()
async def test_read_does_not_read_boundary(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
stream = Stream(data)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
result = await obj.read()
assert b"Hello, world!" == result
assert b"--:" == (await stream.read())
async def test_multiread(self, newline: Any) -> None:
data = b"Hello,%s--:%s%sworld!%s--:--" % ((newline,) * 4)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello," == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_multiline(self, newline: Any) -> None:
data = b"Hello\n,\r\nworld!%s--:--" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello\n,\r\nworld!" == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_respects_content_length(self, newline: Any) -> None:
data = b"." * 100500
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": 100500},
Stream(data + tail),
_newline=newline,
)
result = await obj.read()
assert data == result
assert obj.at_eof()
async def test_read_with_content_encoding_gzip(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "gzip"},
Stream(
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
b"%s--:--" % newline
),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_deflate(self, newline: Any) -> None:
data = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate"},
Stream(data + tail),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_identity(self, newline: Any) -> None:
thing = (
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "identity"},
Stream(thing + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert thing == result
async def test_read_with_content_encoding_unknown(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "snappy"},
Stream(b"\x0e4Time to Relax!%s--:--" % newline),
_newline=newline,
)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_with_content_transfer_encoding_base64(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "base64"},
Stream(b"VGltZSB0byBSZWxheCE=%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_decode_with_content_transfer_encoding_base64(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "base64"},
Stream(b"VG\r\r\nltZSB0byBSZ\r\nWxheCE=%s--:--" % newline),
_newline=newline,
)
result = b""
while not obj.at_eof():
chunk = await obj.read_chunk(size=6)
result += obj.decode(chunk)
assert b"Time to Relax!" == result
async def test_read_with_content_transfer_encoding_quoted_printable(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "quoted-printable"},
Stream(
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,"
b" =D0=BC=D0=B8=D1=80!%s--:--" % newline
),
_newline=newline,
)
result = await obj.read(decode=True)
expected = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
assert result == expected
@pytest.mark.parametrize("encoding", ("binary", "8bit", "7bit"))
async def test_read_with_content_transfer_encoding_binary(
self, encoding: Any, newline: Any
) -> None:
data = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: encoding},
Stream(data + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert data == result
async def test_read_with_content_transfer_encoding_unknown(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "unknown"},
Stream(b"\x0e4Time to Relax!%s--:--" % newline),
_newline=newline,
)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_text(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(b"Hello, world!%s--:--" % newline),
_newline=newline,
)
result = await obj.text()
assert "Hello, world!" == result
async def test_read_text_default_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data.encode("utf-8") + tail),
_newline=newline,
)
result = await obj.text()
assert data == result
async def test_read_text_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data.encode("cp1251") + tail),
_newline=newline,
)
result = await obj.text(encoding="cp1251")
assert data == result
async def test_read_text_guess_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "text/plain;charset=cp1251"},
Stream(data.encode("cp1251") + tail),
_newline=newline,
)
result = await obj.text()
assert data == result
async def test_read_text_compressed(self, newline: Any) -> None:
data = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00" b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate", CONTENT_TYPE: "text/plain"},
Stream(data),
_newline=newline,
)
result = await obj.text()
assert "Time to Relax!" == result
async def test_read_text_while_closed(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: "text/plain"}, Stream(b"")
)
obj._at_eof = True
result = await obj.text()
assert "" == result
async def test_read_json(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json"},
Stream(b'{"test": "passed"}%s--:--' % newline),
_newline=newline,
)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_encoding(self, newline: Any) -> None:
data = '{"тест": "пассед"}'.encode("cp1251")
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json"},
Stream(data + tail),
_newline=newline,
)
result = await obj.json(encoding="cp1251")
assert {"тест": "пассед"} == result
async def test_read_json_guess_encoding(self, newline: Any) -> None:
data = '{"тест": "пассед"}'.encode("cp1251")
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json; charset=cp1251"},
Stream(data + tail),
_newline=newline,
)
result = await obj.json()
assert {"тест": "пассед"} == result
async def test_read_json_compressed(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate", CONTENT_TYPE: "application/json"},
Stream(b"\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00" b"%s--:--" % newline),
_newline=newline,
)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_while_closed(self) -> None:
stream = Stream(b"")
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: "application/json"}, stream
)
obj._at_eof = True
result = await obj.json()
assert result is None
async def test_read_form(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
Stream(data),
_newline=newline,
)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_encoding(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
Stream(data),
_newline=newline,
)
result = await obj.form(encoding="cp1251")
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_guess_encoding(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded; charset=utf-8"},
Stream(data),
_newline=newline,
)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_while_closed(self) -> None:
stream = Stream(b"")
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
stream,
)
obj._at_eof = True
result = await obj.form()
assert not result
async def test_readline(self, newline: Any) -> None:
data = b"Hello\n,\r\nworld!%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data),
_newline=newline,
)
result = await obj.readline()
assert b"Hello\n" == result
result = await obj.readline()
assert b",\r\n" == result
result = await obj.readline()
assert b"world!" == result
result = await obj.readline()
assert b"" == result
assert obj.at_eof()
async def test_release(self, newline: Any) -> None:
data = b"Hello,%s--:\r\n\r\nworld!%s--:--" % (newline, newline)
stream = Stream(data)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
stream,
_newline=newline,
)
remained = b"--:\r\n\r\nworld!%s--:--" % newline
await obj.release()
assert obj.at_eof()
assert remained == stream.content.read()
async def test_release_respects_content_length(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": 100500},
Stream(b"." * 100500 + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.release()
assert result is None
assert obj.at_eof()
async def test_release_release(self, newline: Any) -> None:
data = b"Hello,%s--:\r\n\r\nworld!%s--:--" % (newline, newline)
remained = b"--:\r\n\r\nworld!%s--:--" % newline
stream = Stream(data)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
stream,
_newline=newline,
)
await obj.release()
await obj.release()
assert remained == stream.content.read()
async def test_filename(self) -> None:
part = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_DISPOSITION: "attachment; filename=foo.html"}, None
)
assert "foo.html" == part.filename
async def test_reading_long_part(self, newline: Any) -> None:
size = 2 * 2 ** 16
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol, 2 ** 16, loop=asyncio.get_event_loop())
stream.feed_data(b"0" * size + b"%s--:--" % newline)
stream.feed_eof()
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
data = await obj.read()
assert len(data) == size
class TestMultipartReader:
def test_from_response(self, newline: Any) -> None:
resp = Response(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\nhello%s--:--" % (newline, newline)),
)
res = aiohttp.MultipartReader.from_response(resp)
assert isinstance(res, MultipartResponseWrapper)
assert isinstance(res.stream, aiohttp.MultipartReader)
def test_bad_boundary(self) -> None:
resp = Response(
{CONTENT_TYPE: "multipart/related;boundary=" + "a" * 80}, Stream(b"")
)
with pytest.raises(ValueError):
aiohttp.MultipartReader.from_response(resp)
def test_dispatch(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = reader._get_part_reader({CONTENT_TYPE: "text/plain"})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_bodypart(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = reader._get_part_reader({CONTENT_TYPE: "text/plain"})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_multipart(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----" b"--:--",
]
)
),
)
res = reader._get_part_reader(
{CONTENT_TYPE: "multipart/related;boundary=--:--"}
)
assert isinstance(res, reader.__class__)
def test_dispatch_custom_multipart_reader(self, newline: Any) -> None:
class CustomReader(aiohttp.MultipartReader):
pass
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----",
b"--:--",
]
)
),
)
reader.multipart_reader_cls = CustomReader
res = reader._get_part_reader(
{CONTENT_TYPE: "multipart/related;boundary=--:--"}
)
assert isinstance(res, CustomReader)
async def test_emit_next(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = await reader.next()
assert isinstance(res, reader.part_reader_cls)
async def test_invalid_boundary(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"---:%s\r\necho%s---:--" % (newline, newline)),
)
with pytest.raises(ValueError):
await reader.next()
async def test_release(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"Content-Type: multipart/related;boundary=--:--",
b"",
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----",
b"",
b"--:--",
]
)
),
)
await reader.release()
assert reader.at_eof()
async def test_release_release(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
await reader.release()
assert reader.at_eof()
await reader.release()
assert reader.at_eof()
async def test_release_next(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
await reader.release()
assert reader.at_eof()
res = await reader.next()
assert res is None
async def test_second_next_releases_previous_object(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_release_without_read_the_last_object(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
second = await reader.next()
third = await reader.next()
assert first.at_eof()
assert second.at_eof()
assert second.at_eof()
assert third is None
async def test_read_chunk_by_length_doesnt_breaks_reader(
self, newline: Any
) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"Content-Length: 4",
b"",
b"test",
b"--:",
b"Content-Length: 6",
b"",
b"passed",
b"--:--",
]
)
),
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
while not part.at_eof():
read_part += await part.read_chunk(3)
body_parts.append(read_part)
assert body_parts == [b"test", b"passed"]
async def test_read_chunk_from_stream_doesnt_breaks_reader(
self, newline: Any
) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"chunk",
b"--:",
b"",
b"two_chunks",
b"--:--",
]
)
),
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
while not part.at_eof():
chunk = await part.read_chunk(5)
assert chunk
read_part += chunk
body_parts.append(read_part)
assert body_parts == [b"chunk", b"two_chunks"]
async def test_reading_skips_prelude(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"Multi-part data is not supported.",
b"",
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_read_mixed_newlines(self) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(
b"".join(
[
b"--:\n",
b"Content-Type: multipart/related;boundary=--:--\n",
b"\n",
b"----:--\r\n",
b"\r\n",
b"test\r\n",
b"----:--\r\n",
b"\r\n",
b"passed\r\n",
b"----:----\r\n",
b"\n",
b"--:--",
]
)
),
)
while True:
part = await reader.next()
if part is None:
break
while True:
subpart = await part.next()
if subpart is None:
break
async def test_writer(writer: Any) -> None:
assert writer.size == 7
assert writer.boundary == ":"
async def test_writer_serialize_io_chunk(buf: Any, stream: Any, writer: Any) -> None:
flo = io.BytesIO(b"foobarbaz")
writer.append(flo)
await writer.write(stream)
assert (
buf == b"--:\r\nContent-Type: application/octet-stream"
b"\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n"
)
async def test_writer_serialize_json(buf: Any, stream: Any, writer: Any) -> None:
writer.append_json({"привет": "мир"})
await writer.write(stream)
assert (
b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":'
b' "\\u043c\\u0438\\u0440"}' in buf
)
async def test_writer_serialize_form(buf: Any, stream: Any, writer: Any) -> None:
data = [("foo", "bar"), ("foo", "baz"), ("boo", "zoo")]
writer.append_form(data)
await writer.write(stream)
assert b"foo=bar&foo=baz&boo=zoo" in buf
async def test_writer_serialize_form_dict(buf: Any, stream: Any, writer: Any) -> None:
data = {"hello": "мир"}
writer.append_form(data)
await writer.write(stream)
assert b"hello=%D0%BC%D0%B8%D1%80" in buf
async def test_writer_write(buf: Any, stream: Any, writer: Any) -> None:
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", 1), ("two", 2)])
sub_multipart = aiohttp.MultipartWriter(boundary="::")
sub_multipart.append("nested content")
sub_multipart.headers["X-CUSTOM"] = "test"
writer.append(sub_multipart)
await writer.write(stream)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
b"--:\r\n"
b'Content-Type: multipart/mixed; boundary="::"\r\n'
b"X-CUSTOM: test\r\nContent-Length: 93\r\n\r\n"
b"--::\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 14\r\n\r\n"
b"nested content\r\n"
b"--::--\r\n"
b"\r\n"
b"--:--\r\n"
) == bytes(buf)
async def test_writer_write_no_close_boundary(buf: Any, stream: Any) -> None:
writer = aiohttp.MultipartWriter(boundary=":")
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", 1), ("two", 2)])
await writer.write(stream, close_boundary=False)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
) == bytes(buf)
async def test_writer_write_no_parts(buf: Any, stream: Any, writer: Any) -> None:
await writer.write(stream)
assert b"--:--\r\n" == bytes(buf)
async def test_writer_serialize_with_content_encoding_gzip(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "gzip"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: gzip" == headers
)
decompressor = zlib.decompressobj(wbits=16 + zlib.MAX_WBITS)
data = decompressor.decompress(message.split(b"\r\n")[0])
data += decompressor.flush()
assert b"Time to Relax!" == data
async def test_writer_serialize_with_content_encoding_deflate(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "deflate"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: deflate" == headers
)
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n"
assert thing == message
async def test_writer_serialize_with_content_encoding_identity(
buf: Any, stream: Any, writer: Any
) -> None:
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00"
writer.append(thing, {CONTENT_ENCODING: "identity"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: application/octet-stream\r\n"
b"Content-Encoding: identity\r\n"
b"Content-Length: 16" == headers
)
assert thing == message.split(b"\r\n")[0]
def test_writer_serialize_with_content_encoding_unknown(
buf: Any, stream: Any, writer: Any
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_ENCODING: "snappy"})
async def test_writer_with_content_transfer_encoding_base64(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "base64"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: base64" == headers
)
assert b"VGltZSB0byBSZWxheCE=" == message.split(b"\r\n")[0]
async def test_writer_content_transfer_encoding_quote_printable(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Привет, мир!", {CONTENT_TRANSFER_ENCODING: "quoted-printable"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: quoted-printable" == headers
)
assert (
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,"
b" =D0=BC=D0=B8=D1=80!" == message.split(b"\r\n")[0]
)
def test_writer_content_transfer_encoding_unknown(
buf: Any, stream: Any, writer: Any
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "unknown"})
class TestMultipartWriter:
def test_default_subtype(self, writer: Any) -> None:
mimetype = parse_mimetype(writer.headers.get(CONTENT_TYPE))
assert "multipart" == mimetype.type
assert "mixed" == mimetype.subtype
def test_unquoted_boundary(self) -> None:
writer = aiohttp.MultipartWriter(boundary="abc123")
expected = {CONTENT_TYPE: "multipart/mixed; boundary=abc123"}
assert expected == writer.headers
def test_quoted_boundary(self) -> None:
writer = aiohttp.MultipartWriter(boundary=R"\"")
expected = {CONTENT_TYPE: R'multipart/mixed; boundary="\\\""'}
assert expected == writer.headers
def test_bad_boundary(self) -> None:
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary="тест")
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary="test\n")
def test_default_headers(self, writer: Any) -> None:
expected = {CONTENT_TYPE: 'multipart/mixed; boundary=":"'}
assert expected == writer.headers
def test_iter_parts(self, writer: Any) -> None:
writer.append("foo")
writer.append("bar")
writer.append("baz")
assert 3 == len(list(writer))
def test_append(self, writer: Any) -> None:
assert 0 == len(writer)
writer.append("hello, world!")
assert 1 == len(writer)
assert isinstance(writer._parts[0][0], payload.Payload)
def test_append_with_headers(self, writer: Any) -> None:
writer.append("hello, world!", {"x-foo": "bar"})
assert 1 == len(writer)
assert "x-foo" in writer._parts[0][0].headers
assert writer._parts[0][0].headers["x-foo"] == "bar"
def test_append_json(self, writer: Any) -> None:
writer.append_json({"foo": "bar"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "application/json"
def test_append_part(self, writer: Any) -> None:
part = payload.get_payload("test", headers={CONTENT_TYPE: "text/plain"})
writer.append(part, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_json_overrides_content_type(self, writer: Any) -> None:
writer.append_json({"foo": "bar"}, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_form(self, writer: Any) -> None:
writer.append_form({"foo": "bar"}, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_multipart(self, writer: Any) -> None:
subwriter = aiohttp.MultipartWriter(boundary=":")
subwriter.append_json({"foo": "bar"})
writer.append(subwriter, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_with(self) -> None:
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append("foo")
writer.append(b"bar")
writer.append_json({"baz": True})
assert 3 == len(writer)
def test_append_int_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(1)
def test_append_float_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(1.1)
def test_append_none_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(None)
async def test_write_preserves_content_disposition(
self, buf: Any, stream: Any
) -> None:
with aiohttp.MultipartWriter(boundary=":") as writer:
part = writer.append(b"foo", headers={CONTENT_TYPE: "test/passed"})
part.set_content_disposition("form-data", filename="bug")
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: test/passed\r\n"
b"Content-Length: 3\r\n"
b"Content-Disposition:"
b' form-data; filename="bug"'
)
assert message == b"foo\r\n--:--\r\n"
async def test_preserve_content_disposition_header(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={
CONTENT_DISPOSITION: 'attachments; filename="bug.py"',
CONTENT_TYPE: "text/python",
},
)
content_length = part.size
await writer.write(stream)
assert part.headers[CONTENT_TYPE] == "text/python"
assert part.headers[CONTENT_DISPOSITION] == ('attachments; filename="bug.py"')
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/python\r\n"
b'Content-Disposition: attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_set_content_disposition_override(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={
CONTENT_DISPOSITION: 'attachments; filename="bug.py"',
CONTENT_TYPE: "text/python",
},
)
content_length = part.size
await writer.write(stream)
assert part.headers[CONTENT_TYPE] == "text/python"
assert part.headers[CONTENT_DISPOSITION] == ('attachments; filename="bug.py"')
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/python\r\n"
b'Content-Disposition: attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_reset_content_disposition_header(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={CONTENT_TYPE: "text/plain"},
)
content_length = part.size
assert CONTENT_DISPOSITION in part.headers
part.set_content_disposition("attachments", filename="bug.py")
await writer.write(stream)
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/plain\r\n"
b"Content-Disposition:"
b' attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_async_for_reader() -> None:
data = [{"test": "passed"}, 42, b"plain text", b"aiohttp\n", b"no epilogue"]
reader = aiohttp.MultipartReader(
headers={CONTENT_TYPE: 'multipart/mixed; boundary=":"'},
content=Stream(
b"\r\n".join(
[
b"--:",
b"Content-Type: application/json",
b"",
json.dumps(data[0]).encode(),
b"--:",
b"Content-Type: application/json",
b"",
json.dumps(data[1]).encode(),
b"--:",
b'Content-Type: multipart/related; boundary="::"',
b"",
b"--::",
b"Content-Type: text/plain",
b"",
data[2],
b"--::",
b'Content-Disposition: attachment; filename="aiohttp"',
b"Content-Type: text/plain",
b"Content-Length: 28",
b"Content-Encoding: gzip",
b"",
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03K\xcc\xcc\xcf())"
b"\xe0\x02\x00\xd6\x90\xe2O\x08\x00\x00\x00",
b"--::",
b'Content-Type: multipart/related; boundary=":::"',
b"",
b"--:::",
b"Content-Type: text/plain",
b"",
data[4],
b"--:::--",
b"--::--",
b"",
b"--:--",
b"",
]
)
),
)
idata = iter(data)
async def check(reader):
async for part in reader:
if isinstance(part, aiohttp.BodyPartReader):
if part.headers[CONTENT_TYPE] == "application/json":
assert next(idata) == (await part.json())
else:
assert next(idata) == await part.read(decode=True)
else:
await check(part)
await check(reader)
async def test_async_for_bodypart() -> None:
part = aiohttp.BodyPartReader(
boundary=b"--:", headers={}, content=Stream(b"foobarbaz\r\n--:--")
)
async for data in part:
assert data == b"foobarbaz"
``` |
[
{
"content": "Here is the script:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom optparse import make_option\n\nfrom fabric.colors import green\n\nfrom django.core.management.base import BaseCommand\n\nfrom magicbackup.helpers import MagicBackup\n\n\nclass Command(BaseComma... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom optparse import make_option\n\nfrom fabric.colors import green\n\nfrom django.core.management.base import BaseCommand\n\nfrom magicbackup.helpers import MagicBackup\n\n\nclass C... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from optparse import make_option
from fabric.colors import green
from django.core.management.base import BaseCommand
from magicbackup.helpers import MagicBackup
class Command(BaseCommand):
help = 'Backup a Site'
option_list = BaseCommand.option_list + (
make_option('--backup-name', action='store', dest='backup_name',
type='string', help='A name for backup folder'),
make_option('--site-id', action='store', dest='site_id',
type='int', help='The site ID'),
)
def handle(self, *args, **options):
backup_name = options['backup_name']
site_id = options['site_id']
if not backup_name or not site_id:
raise Exception('backup_name or site_id is missing')
models = ["magiccontent.Widget",
"magiccontent.Area",
"magiccontent.SiteLink",
"magicgallery.Gallery",
"magicgallery.GalleryItem",
"textimagecontent.TextImageContent",
"formattedtextimagecontent.FormattedTextImageContent",
"iconcontent.IconContent",
"background.BackgroundArea",
"dividertextcontent.DividerTextContent",
"imagecontent.ImageContent",
"magiccontentnavigation.MenuItem",
"core.SitePreferences",
"magicthemes.ThemePreferences", ]
backup = MagicBackup().site(site_id).save_as(backup_name)
for model in models:
print(green('backuping {0}...'.format(model)))
backup.model(model).backup()
print(green('new backup created at {0}'.format(backup.target_dir)))
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n# env\nsys.path.append('/usr/lib/python2.7/dist-packages/')\nsys.path.append('/usr/lib/python2.7/')\nsys.path.append('/usr/local/lib/python2.7/dist-packages/')\nsys.path.append('/data2/django_1.... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n# env\nsys.path.append('/usr/lib/python2.7/dist-packages/')\nsys.path.append('/usr/lib/python2.7/')\nsys.path.append('/usr/local/lib/python2.7/dist-packages/')\nsys.path.append('... | ```python
# -*- coding: utf-8 -*-
import os
import sys
# env
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/')
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/data2/django_1.11/')
sys.path.append('/data2/django_projects/')
sys.path.append('/data2/django_third/')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djaludir.settings')
from djzbar.utils.informix import do_sql
import argparse
"""
obtain the start_year and end_year for students
"""
# set up command-line options
desc = """
Accepts as input a user ID.
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-i', '--uid',
help = "User ID.",
dest = 'uid',
required = True
)
parser.add_argument(
'--test',
action = 'store_true',
help = "Dry run?",
dest = 'test'
)
def main():
"""
main method
"""
sql = '''
SELECT
MIN(yr) AS start_year, MAX(yr) AS end_year
FROM
stu_acad_rec WHERE id = "{}" AND yr > 0
'''.format(uid)
objs = do_sql(sql)
for obj in objs:
print obj
######################
# shell command line
######################
if __name__ == '__main__':
args = parser.parse_args()
test = args.test
if test:
print args
# vars
uid = args.uid
sys.exit(main())
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python3\n\n# RAAED Server software: v1.0\n# A GUI RAAED Server\n# Detects a reverse SSH connection bound to port 22 from an RAAED Client.\n#\n# DESCRIPTION\n# The server is designed to continually ... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\n# RAAED Server software: v1.0\n# A GUI RAAED Server\n# Detects a reverse SSH connection bound to port 22 from an RAAED Client.\n#\n# DESCRIPTION\n# The server is designed... | ```python
#!/usr/bin/env python3
# RAAED Server software: v1.0
# A GUI RAAED Server
# Detects a reverse SSH connection bound to port 22 from an RAAED Client.
#
# DESCRIPTION
# The server is designed to continually check for the prescence of a reverse SSH session on port 22.
# The GUI will then reflect the presence of the reverse SSH session.
# A Shell in the context of the reverse SSH session can be launched through clicking a button.
#
# SSH REQUIREMENTS
# This script requires an SSH service to be active and running locally.
# /etc/ssh/sshd_config should be configured to allow public key authentication, and operate on port 443.
# a valid private RSA key for the RAAED Client should be placed in ~/.ssh (id_rsa)
# a valid public key with an associated private key on the RAAED Client should be located in ~/.ssh (id_rsa.pub)
#
# THIRD PARTY DEPENDENCIES
# pip3 install psutil
# pip3 install appjar
#
# AUTHOR: forScience (james@forscience.xyz)
#
# INDENT: TABS
import sys
import os
import threading
import subprocess
import psutil
import time
from appJar import gui
# Checks if port 22 is listening on localhost.
# Called in a thread at launch. Runs in the background.
# If the the port is open then update GUI to reflect change
def connection_check():
# loop infinately (in background)
while True:
time.sleep(2)
# retrieve tuples of all local IPv4 connections (in form of [IP, Port])
local = psutil.net_connections('inet4')
connect = False # set flag to false each itteration of loop
# iterrate through local IPv4 tuples
for address in local:
(ip, port) = address.laddr # assign each tuple to local variables
# check each IP for localhost and Port for 22
if ip == '127.0.0.1' and port == 22:
connect = True # set flag to indicate connection
# if flag has been set then connection exists
if connect:
# only update GUI if port 22 on localhost is found
gui_update("connected")
else:
# otherwise GUI continues to indicate disconnection
gui_update("disconnected")
# Updates GUI to show client connection state
# Called by connection_check() depending on local port activity
# Updates indicator and text to reflect state
def gui_update(update):
if update == "connected":
# update gui to reflect connection
# update indicator
app.setLabel("indicator", "Connected") # update GUI indicator text
app.setLabelBg("indicator", "green") # update GUI indicator colour
# update text
app.setLabelFg("text", "green") # update GUI text colour
text = "Connected to client" # create explanation string
app.setLabel("text", text) # update GUI with explanation string
elif update == "disconnected":
# update gui to reflect disconnection
# update indicator
app.setLabel("indicator", "Disconnected") # update GUI indicator text
app.setLabelBg("indicator", "red") # update GUI indicator colour
# update text
app.setLabelFg("text", "red") # update GUI text colour
text = "No connection from client" # create explanation string
app.setLabel("text", text) # update GUI with explanation string
elif update == "list targets":
# update gui with targets from client
# open retrieved network list file
with open('/root/Desktop/network.list', 'r') as file:
iplist = file.read() # read in file to variable and remove EOL
# display targets in gui
app.setMessage('enumeration', iplist)
# Spawns an SSH session in a new shell
# gnome-terminal only works within the GNOME DE
def spawn_shell(btn):
# terminal remains open after command issued with '-x'
subprocess.call(['gnome-terminal', '-x', 'ssh', 'localhost'])
# Connects via scp to RAAED Client and retrieves a list of
# IPs enumerated on the Clients local network.
# The list is displayed in the GUI
def get_enum(btn):
# define local and remote list locations
localdest = "/root/Desktop/network.list"
remotedest = "/root/Desktop/network.list"
# retrieve enumeration txt files from client
sshcopy = "scp root@localhost:" + remotedest + " " + localdest # build ssh copy command
copyresult = subprocess.call(sshcopy, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # execute scp command
# if scp was successful
if copyresult == 0:
# update gui and delete localdest file
gui_update('list targets')
delfile = "rm " + localdest # build command to delete local network.list file
subprocess.call(delfile, shell=True) # delete file
# Entry
if __name__ == "__main__":
# put connection_check() in a thread and background
thread = threading.Thread(target=connection_check, args=())
thread.daemon = True # daemonised for clean closure, ok to kill with main
thread.start() # start daemon thread
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<
# GUI ELEMENTS
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>
# create the GUI & set a title
app = gui("RAAED Server")
app.setBg("white")
app.setFont(12, font="Arial")
app.setSticky("nesw")
app.setResizable(canResize=False)
# RAAED CONNECTION STATUS
app.startLabelFrame("Connection Status")
app.setLabelFramePadding("Connection Status", 4, 8)
# connection indicator
app.addLabel("indicator", "Disconnected", 0, 0)
app.setLabelBg("indicator", "red")
app.setLabelFg("indicator", "white")
app.setLabelPadding("indicator", 2, 5)
# explanation text
app.addLabel("text", "No connection from client", 0, 1)
app.setLabelFg("text", "red")
app.setLabelPadding("text", 4, 8)
# end frame
app.stopLabelFrame()
# SPAWN SHELL AND RETRIEVE ENUM BUTTONS
app.startLabelFrame("")
app.setLabelFramePadding("", 4, 8)
# spawn shell button
app.addButton("Spawn Shell", spawn_shell, 0, 0)
# retrieve enumeration button
app.addButton("Show Remote Hosts", get_enum, 0, 1)
# end bottom frame
app.stopLabelFrame()
# REMOTE TARGET LIST
app.startLabelFrame("Remote Network Hosts")
app.setLabelFramePadding("Remote Network Hosts", 4, 8)
# spawn shell button
app.addEmptyMessage("enumeration")
# end bottom frame
app.stopLabelFrame()
# start GUI
app.go()
``` |
[
{
"content": "Here is the code content:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2013-2017 Camptocamp SA\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)\n\n\"\"\"\nBinders\n=======\n\nBinders are components that know how to find the external ID for an\nOdoo ID, how to find the Odoo ... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2013-2017 Camptocamp SA\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)\n\n\"\"\"\nBinders\n=======\n\nBinders are components that know how to find the external ID for an\nOdoo ID, how t... | ```python
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
"""
Binders
=======
Binders are components that know how to find the external ID for an
Odoo ID, how to find the Odoo ID for an external ID and how to
create the binding between them.
"""
import psycopg2
import json
from odoo import fields, models, tools
from odoo.addons.component.core import AbstractComponent
from contextlib import contextmanager
from odoo.addons.connector.exception import (RetryableJobError, )
import odoo
class BinderComposite(AbstractComponent):
""" The same as Binder but allowing composite external keys
"""
_name = 'base.binder.composite'
_inherit = 'base.binder'
_default_binding_field = 'oxigesti_bind_ids'
_external_display_field = 'external_id_display'
_odoo_extra_fields = []
@contextmanager
def _retry_unique_violation(self):
""" Context manager: catch Unique constraint error and retry the
job later.
When we execute several jobs workers concurrently, it happens
that 2 jobs are creating the same record at the same time (binding
record created by :meth:`_export_dependency`), resulting in:
IntegrityError: duplicate key value violates unique
constraint "my_backend_product_product_odoo_uniq"
DETAIL: Key (backend_id, odoo_id)=(1, 4851) already exists.
In that case, we'll retry the import just later.
.. warning:: The unique constraint must be created on the
binding record to prevent 2 bindings to be created
for the same External record.
"""
try:
yield
except psycopg2.IntegrityError as err:
if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise RetryableJobError(
'A database error caused the failure of the job:\n'
'%s\n\n'
'Likely due to 2 concurrent jobs wanting to create '
'the same record. The job will be retried later.' % err)
else:
raise
def _is_binding(self, binding):
try:
binding._fields[self._odoo_field]
except KeyError:
return False
return True
def _find_binding(self, relation, binding_extra_vals={}):
if self._is_binding(relation):
raise Exception("The source object %s must not be a binding" % relation.model._name)
if not set(self._odoo_extra_fields).issubset(set(binding_extra_vals.keys())):
raise Exception("If _odoo_extra_fields are defined %s, "
"you must specify the correpsonding binding_extra_vals %s" % (
self._odoo_extra_fields, binding_extra_vals))
domain = [(self._odoo_field, '=', relation.id),
(self._backend_field, '=', self.backend_record.id)]
for f in self._odoo_extra_fields:
domain.append((f, '=', binding_extra_vals[f]))
binding = self.model.with_context(
active_test=False).search(domain)
if binding:
binding.ensure_one()
return binding
def wrap_binding(self, relation, binding_field=None, binding_extra_vals={}):
if not relation:
return
if binding_field is None:
if not self._default_binding_field:
raise Exception("_default_binding_field defined on synchronizer class is mandatory")
binding_field = self._default_binding_field
# wrap is typically True if the relation is a 'product.product'
# record but the binding model is 'oxigesti.product.product'
wrap = relation._name != self.model._name
if wrap and hasattr(relation, binding_field):
binding = self._find_binding(relation, binding_extra_vals)
if not binding:
# we are working with a unwrapped record (e.g.
# product.template) and the binding does not exist yet.
# Example: I created a product.product and its binding
# oxigesti.product.product, it is exported, but we need to
# create the binding for the template.
_bind_values = {self._odoo_field: relation.id,
self._backend_field: self.backend_record.id}
_bind_values.update(binding_extra_vals)
# If 2 jobs create it at the same time, retry
# one later. A unique constraint (backend_id,
# odoo_id) should exist on the binding model
with self._retry_unique_violation():
binding = (self.model
.with_context(connector_no_export=True)
.sudo()
.create(_bind_values))
# Eager commit to avoid having 2 jobs
# exporting at the same time. The constraint
# will pop if an other job already created
# the same binding. It will be caught and
# raise a RetryableJobError.
if not odoo.tools.config['test_enable']:
self.env.cr.commit() # nowait
else:
# If oxigest_bind_ids does not exist we are typically in a
# "direct" binding (the binding record is the same record).
# If wrap is True, relation is already a binding record.
binding = relation
if not self._is_binding(binding):
raise Exception(
"Expected binding '%s' and found regular model '%s'" % (self.model._name, relation._name))
return binding
def to_internal(self, external_id, unwrap=False):
""" Give the Odoo recordset for an external ID
:param external_id: external ID for which we want
the Odoo ID
:param unwrap: if True, returns the normal record
else return the binding record
:return: a recordset, depending on the value of unwrap,
or an empty recordset if the external_id is not mapped
:rtype: recordset
"""
domain = [(self._backend_field, '=', self.backend_record.id),
(self._external_display_field, '=', json.dumps(external_id))]
bindings = self.model.with_context(active_test=False).search(
domain
)
if not bindings:
if unwrap:
return self.model.browse()[self._odoo_field]
return self.model.browse()
bindings.ensure_one()
if unwrap:
bindings = bindings[self._odoo_field]
return bindings
def to_external(self, binding, wrap=False, wrapped_model=None, binding_extra_vals={}):
""" Give the external ID for an Odoo binding ID
:param binding: Odoo binding for which we want the external id
:param wrap: if True, binding is a normal record, the
method will search the corresponding binding and return
the external id of the binding
:return: external ID of the record
"""
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
if wrap:
if not wrapped_model:
raise Exception("The wrapped model is mandatory if binding is not an object")
binding = self.env[wrapped_model].browse(binding)
else:
binding = self.model.browse(binding)
if wrap:
binding = self._find_binding(binding, binding_extra_vals)
if not binding:
return None
return binding[self._external_field] or None
def bind(self, external_id, binding):
""" Create the link between an external ID and an Odoo ID
:param external_id: external id to bind
:param binding: Odoo record to bind
:type binding: int
"""
# Prevent False, None, or "", but not 0
assert (external_id or external_id is 0) and binding, (
"external_id or binding missing, "
"got: %s, %s" % (external_id, binding)
)
# avoid to trigger the export when we modify the `external_id`
now_fmt = fields.Datetime.now()
if isinstance(binding, models.BaseModel):
binding.ensure_one()
else:
binding = self.model.browse(binding)
binding.with_context(connector_no_export=True).write({
self._external_field: external_id,
self._sync_date_field: now_fmt,
})
def _get_external_id(self, binding):
return None
``` |
[
{
"content": "Here is the script:\n```python\nr\"\"\"\n\n**openpnm.network**\n\n----\n\nThis module contains the ``GenericNetwork`` class, whose main purpose is to\nmanage the topological representation of the Network. It also houses a\ncollection of Network generators.\n\n----\n\n**Available Network Generator... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nr\"\"\"\n\n**openpnm.network**\n\n----\n\nThis module contains the ``GenericNetwork`` class, whose main purpose is to\nmanage the topological representation of the Network. It also houses a\ncollection of Network generators.\n\n----\n\n**Available N... | ```python
r"""
**openpnm.network**
----
This module contains the ``GenericNetwork`` class, whose main purpose is to
manage the topological representation of the Network. It also houses a
collection of Network generators.
----
**Available Network Generators**
OpenPNM includes a variety of Network generators. The basically include two
families of topology: periodic lattices and tessellations of random points.
+---------------------+-------------------------------------------------------+
| Generator Name | Description |
+=====================+=======================================================+
| Cubic | Simple cubic lattice with connectivity from 6 to 26 |
+---------------------+-------------------------------------------------------+
| CubicDual | Body centered cubic lattice plus face centered nodes |
| | on the surfaces |
+---------------------+-------------------------------------------------------+
| CubicTemplate | Simple cubic lattice with arbitrary domain shape |
| | specified by a template image |
+---------------------+-------------------------------------------------------+
| Bravais | Crystal lattice types including fcc, bcc, sc, and hcp |
+---------------------+-------------------------------------------------------+
| Delaunay | Random network formed by Delaunay tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| Voronoi | Random network formed by Voronoi tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| Gabriel | Random network formed by Gabriel tessellation of |
| | arbitrary base points |
+---------------------+-------------------------------------------------------+
| DelaunayVoronoiDual | Combined and interconnected Voronoi and Delaunay |
| | tessellations |
+---------------------+-------------------------------------------------------+
----
**The GenericNetwork Class**
All of the above Network classes derive from the GenericNetwork class. It is
a subclass of ``Base`` so contains methods for retrieving sets of pores based
on labels and so forth, but also contains the following additional methods
that are used soley for topological queries.
Pore networks require two essential pieces of information:
- the spatial location of pores
- the connectivity of which throats connect which pores
The ``GenericNetwork`` class and it's subclasses are responsible for storing,
managing, and utilizing this information.
Network topology is stored using `adjacency matrices
<https://en.wikipedia.org/wiki/Adjacency_matrix>`_. Moreover, this is stored
using a `sparse matrix format <https://en.wikipedia.org/wiki/Sparse_matrix>`_
known as COO. All netowrk objects store the COO matrix as ``'throat.conns'``.
The spatial location of each pore is stored in Cartesian coordinates [x, y, z],
under ``'pore.coords'``. All networks must be 3D, so even a 2D network must
have a z-component (but set to 0).
The following methods are implemented on ``GenericNetwork``, and look into
the ``'throat.conns'`` and ``'pore.coords'`` as needed.
+-------------------------+---------------------------------------------------+
| Method | Description |
+=========================+===================================================+
| num_neighbors | Counts the number of neighbors with a given label |
+-------------------------+---------------------------------------------------+
| find_neighbor_pores | Gets indices of pores neighboring a given pore |
+-------------------------+---------------------------------------------------+
| find_neighbor_throats | Gets indices of neighbor throats to a given pore |
+-------------------------+---------------------------------------------------+
| find_connected_pores | Gets indices of pores connected by a given throat |
+-------------------------+---------------------------------------------------+
| find_connecting_throat | Gets indices of the throat joining pairs of pores |
+-------------------------+---------------------------------------------------+
| find_nearby_pores | Find all pores within given distance of given pore|
+-------------------------+---------------------------------------------------+
| create_adjacency_matrix | Generates a weighted adjacency matrix |
+-------------------------+---------------------------------------------------+
| create_incidence_matrix | Creates a weighted incidence matrix |
+-------------------------+---------------------------------------------------+
| get_adjacency_matrix | Returns an adjacency matrix with default weights |
+-------------------------+---------------------------------------------------+
| get_incidence_matrix | Returns an incidence matrix with default weights |
+-------------------------+---------------------------------------------------+
| check_network_health | Check various aspects of topology for problems |
+-------------------------+---------------------------------------------------+
"""
from .GenericNetwork import GenericNetwork
from .Cubic import Cubic
from .CubicDual import CubicDual
from .Bravais import Bravais
from .CubicTemplate import CubicTemplate
from .DelaunayVoronoiDual import DelaunayVoronoiDual
from .Voronoi import Voronoi
from .Delaunay import Delaunay
from .Gabriel import Gabriel
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# Copyright 2008-2012 Nokia Siemens Networks Oyj\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of t... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# Copyright 2008-2012 Nokia Siemens Networks Oyj\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may ob... | ```python
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import getopt # optparse was not supported by Jython 2.2
import os
import re
import sys
import glob
import string
import codecs
import textwrap
from robot.errors import DataError, Information, FrameworkError
from robot.version import get_full_version
from .misc import plural_or_not
from .encoding import decode_output, decode_from_system, utf8open
ESCAPES = dict(
space = ' ', apos = "'", quot = '"', lt = '<', gt = '>',
pipe = '|', star = '*', comma = ',', slash = '/', semic = ';',
colon = ':', quest = '?', hash = '#', amp = '&', dollar = '$',
percent = '%', at = '@', exclam = '!', paren1 = '(', paren2 = ')',
square1 = '[', square2 = ']', curly1 = '{', curly2 = '}', bslash = '\\'
)
class ArgumentParser:
_opt_line_re = re.compile('''
^\s{1,4} # 1-4 spaces in the beginning of the line
((-\S\s)*) # all possible short options incl. spaces (group 1)
--(\S{2,}) # required long option (group 3)
(\s\S+)? # optional value (group 4)
(\s\*)? # optional '*' telling option allowed multiple times (group 5)
''', re.VERBOSE)
def __init__(self, usage, name=None, version=None, arg_limits=None,
validator=None, auto_help=True, auto_version=True,
auto_escape=True, auto_pythonpath=True, auto_argumentfile=True):
"""Available options and tool name are read from the usage.
Tool name is got from the first row of the usage. It is either the
whole row or anything before first ' -- '.
"""
if not usage:
raise FrameworkError('Usage cannot be empty')
self.name = name or usage.splitlines()[0].split(' -- ')[0].strip()
self.version = version or get_full_version()
self._usage = usage
self._arg_limit_validator = ArgLimitValidator(arg_limits)
self._validator = validator
self._auto_help = auto_help
self._auto_version = auto_version
self._auto_escape = auto_escape
self._auto_pythonpath = auto_pythonpath
self._auto_argumentfile = auto_argumentfile
self._short_opts = ''
self._long_opts = []
self._multi_opts = []
self._toggle_opts = []
self._names = []
self._short_to_long = {}
self._expected_args = ()
self._create_options(usage)
def parse_args(self, args_list):
"""Parse given arguments and return options and positional arguments.
Arguments must be given as a list and are typically sys.argv[1:].
Options are retuned as a dictionary where long options are keys. Value
is a string for those options that can be given only one time (if they
are given multiple times the last value is used) or None if the option
is not used at all. Value for options that can be given multiple times
(denoted with '*' in the usage) is a list which contains all the given
values and is empty if options are not used. Options not taken
arguments have value False when they are not set and True otherwise.
Positional arguments are returned as a list in the order they are given.
If 'check_args' is True, this method will automatically check that
correct number of arguments, as parsed from the usage line, are given.
If the last argument in the usage line ends with the character 's',
the maximum number of arguments is infinite.
Possible errors in processing arguments are reported using DataError.
Some options have a special meaning and are handled automatically
if defined in the usage and given from the command line:
--escape option can be used to automatically unescape problematic
characters given in an escaped format.
--argumentfile can be used to automatically read arguments from
a specified file. When --argumentfile is used, the parser always
allows using it multiple times. Adding '*' to denote that is thus
recommend. A special value 'stdin' can be used to read arguments from
stdin instead of a file.
--pythonpath can be used to add extra path(s) to sys.path.
--help and --version automatically generate help and version messages.
Version is generated based on the tool name and version -- see __init__
for information how to set them. Help contains the whole usage given to
__init__. Possible <VERSION> text in the usage is replaced with the
given version. Possible <--ESCAPES--> is replaced with available
escapes so that they are wrapped to multiple lines but take the same
amount of horizontal space as <---ESCAPES--->. Both help and version
are wrapped to Information exception.
"""
args_list = [decode_from_system(a) for a in args_list]
if self._auto_argumentfile:
args_list = self._process_possible_argfile(args_list)
opts, args = self._parse_args(args_list)
opts, args = self._handle_special_options(opts, args)
self._arg_limit_validator(args)
if self._validator:
opts, args = self._validator(opts, args)
return opts, args
def _handle_special_options(self, opts, args):
if self._auto_escape and opts.get('escape'):
opts, args = self._unescape_opts_and_args(opts, args)
if self._auto_help and opts.get('help'):
self._raise_help()
if self._auto_version and opts.get('version'):
self._raise_version()
if self._auto_pythonpath and opts.get('pythonpath'):
sys.path = self._get_pythonpath(opts['pythonpath']) + sys.path
for auto, opt in [(self._auto_help, 'help'),
(self._auto_version, 'version'),
(self._auto_escape, 'escape'),
(self._auto_pythonpath, 'pythonpath'),
(self._auto_argumentfile, 'argumentfile')]:
if auto and opt in opts:
opts.pop(opt)
return opts, args
def _parse_args(self, args):
args = [self._lowercase_long_option(a) for a in args]
try:
opts, args = getopt.getopt(args, self._short_opts, self._long_opts)
except getopt.GetoptError, err:
raise DataError(err.msg)
return self._process_opts(opts), self._glob_args(args)
def _lowercase_long_option(self, opt):
if not opt.startswith('--'):
return opt
if '=' not in opt:
return opt.lower()
opt, value = opt.split('=', 1)
return '%s=%s' % (opt.lower(), value)
def _unescape_opts_and_args(self, opts, args):
try:
escape_strings = opts['escape']
except KeyError:
raise FrameworkError("No 'escape' in options")
escapes = self._get_escapes(escape_strings)
for name, value in opts.items():
if name != 'escape':
opts[name] = self._unescape(value, escapes)
return opts, [self._unescape(arg, escapes) for arg in args]
def _process_possible_argfile(self, args):
argfile_opts = ['--argumentfile']
for sopt, lopt in self._short_to_long.items():
if lopt == 'argumentfile':
argfile_opts.append('-'+sopt)
while True:
try:
index = self._get_argfile_index(args, argfile_opts)
path = args[index+1]
except IndexError:
break
args[index:index+2] = self._get_args_from_file(path)
return args
def _get_argfile_index(self, args, argfile_opts):
for opt in argfile_opts:
if opt in args:
return args.index(opt)
raise IndexError
def _get_args_from_file(self, path):
if path.upper() != 'STDIN':
content = self._read_argfile(path)
else:
content = self._read_argfile_from_stdin()
return self._process_argfile(content)
def _read_argfile(self, path):
try:
with utf8open(path) as f:
content = f.read()
except (IOError, UnicodeError), err:
raise DataError("Opening argument file '%s' failed: %s"
% (path, err))
if content.startswith(codecs.BOM_UTF8.decode('UTF-8')):
content = content[1:]
return content
def _read_argfile_from_stdin(self):
content = sys.__stdin__.read()
if sys.platform != 'cli':
content = decode_output(content)
return content
def _process_argfile(self, content):
args = []
for line in content.splitlines():
line = line.strip()
if line.startswith('-'):
args.extend(line.split(' ', 1))
elif line and not line.startswith('#'):
args.append(line)
return args
def _get_escapes(self, escape_strings):
escapes = {}
for estr in escape_strings:
try:
name, value = estr.split(':', 1)
except ValueError:
raise DataError("Invalid escape string syntax '%s'. "
"Expected: what:with" % estr)
try:
escapes[value] = ESCAPES[name.lower()]
except KeyError:
raise DataError("Invalid escape '%s'. Available: %s"
% (name, self._get_available_escapes()))
return escapes
def _unescape(self, value, escapes):
if value in [None, True, False]:
return value
if isinstance(value, list):
return [self._unescape(item, escapes) for item in value]
for esc_name, esc_value in escapes.items():
if esc_name in value:
value = value.replace(esc_name, esc_value)
return value
def _process_opts(self, opt_tuple):
opts = self._init_opts()
for name, value in opt_tuple:
name = self._get_name(name)
if name in self._multi_opts:
opts[name].append(value)
elif name in self._toggle_opts:
opts[name] = not opts[name]
else:
opts[name] = value
return opts
def _glob_args(self, args):
temp = []
for path in args:
paths = sorted(glob.glob(path))
if paths:
temp.extend(paths)
else:
temp.append(path)
return temp
def _init_opts(self):
opts = {}
for name in self._names:
if name in self._multi_opts:
opts[name] = []
elif name in self._toggle_opts:
opts[name] = False
else:
opts[name] = None
return opts
def _get_name(self, name):
name = name.lstrip('-')
try:
return self._short_to_long[name]
except KeyError:
return name
def _create_options(self, usage):
for line in usage.splitlines():
res = self._opt_line_re.match(line)
if res:
self._create_option(short_opts=[o[1] for o in res.group(1).split()],
long_opt=res.group(3).lower(),
takes_arg=bool(res.group(4)),
is_multi=bool(res.group(5)))
def _create_option(self, short_opts, long_opt, takes_arg, is_multi):
if long_opt in self._names:
self._raise_option_multiple_times_in_usage('--' + long_opt)
self._names.append(long_opt)
for sopt in short_opts:
if self._short_to_long.has_key(sopt):
self._raise_option_multiple_times_in_usage('-' + sopt)
self._short_to_long[sopt] = long_opt
if is_multi:
self._multi_opts.append(long_opt)
if takes_arg:
long_opt += '='
short_opts = [sopt+':' for sopt in short_opts]
else:
self._toggle_opts.append(long_opt)
self._long_opts.append(long_opt)
self._short_opts += (''.join(short_opts))
def _get_pythonpath(self, paths):
if isinstance(paths, basestring):
paths = [paths]
temp = []
for path in self._split_pythonpath(paths):
temp.extend(glob.glob(path))
return [os.path.abspath(path) for path in temp if path]
def _split_pythonpath(self, paths):
# paths may already contain ':' as separator
tokens = ':'.join(paths).split(':')
if os.sep == '/':
return tokens
# Fix paths split like 'c:\temp' -> 'c', '\temp'
ret = []
drive = ''
for item in tokens:
item = item.replace('/', '\\')
if drive and item.startswith('\\'):
ret.append('%s:%s' % (drive, item))
drive = ''
continue
if drive:
ret.append(drive)
drive = ''
if len(item) == 1 and item in string.letters:
drive = item
else:
ret.append(item)
if drive:
ret.append(drive)
return ret
def _get_available_escapes(self):
names = sorted(ESCAPES.keys(), key=str.lower)
return ', '.join('%s (%s)' % (n, ESCAPES[n]) for n in names)
def _raise_help(self):
msg = self._usage
if self.version:
msg = msg.replace('<VERSION>', self.version)
def replace_escapes(res):
escapes = 'Available escapes: ' + self._get_available_escapes()
lines = textwrap.wrap(escapes, width=len(res.group(2)))
indent = ' ' * len(res.group(1))
return '\n'.join(indent + line for line in lines)
msg = re.sub('( *)(<-+ESCAPES-+>)', replace_escapes, msg)
raise Information(msg)
def _raise_version(self):
raise Information('%s %s' % (self.name, self.version))
def _raise_option_multiple_times_in_usage(self, opt):
raise FrameworkError("Option '%s' multiple times in usage" % opt)
class ArgLimitValidator(object):
def __init__(self, arg_limits):
self._min_args, self._max_args = self._parse_arg_limits(arg_limits)
def _parse_arg_limits(self, arg_limits):
if arg_limits is None:
return 0, sys.maxint
if isinstance(arg_limits, int):
return arg_limits, arg_limits
if len(arg_limits) == 1:
return arg_limits[0], sys.maxint
return arg_limits[0], arg_limits[1]
def __call__(self, args):
if not (self._min_args <= len(args) <= self._max_args):
self._raise_invalid_args(self._min_args, self._max_args, len(args))
def _raise_invalid_args(self, min_args, max_args, arg_count):
min_end = plural_or_not(min_args)
if min_args == max_args:
expectation = "%d argument%s" % (min_args, min_end)
elif max_args != sys.maxint:
expectation = "%d to %d arguments" % (min_args, max_args)
else:
expectation = "at least %d argument%s" % (min_args, min_end)
raise DataError("Expected %s, got %d." % (expectation, arg_count))
``` |
[
{
"content": "```python\n# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless... | [
{
"content": "<|memory_start|>```python\n# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-... | ```python
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from datetime import timedelta, datetime
from functools import wraps
import inspect
import json
import logging
import os
import pprint
import sys
import time
import yaml
from c7n.credentials import SessionFactory
from c7n.policy import Policy, load as policy_load
from c7n.reports import report as do_report
from c7n.utils import Bag, dumps
from c7n.manager import resources
from c7n.resources import load_resources
from c7n import mu, schema
log = logging.getLogger('custodian.commands')
def policy_command(f):
@wraps(f)
def _load_policies(options):
load_resources()
collection = policy_load(options, options.config)
policies = collection.filter(options.policy_filter)
return f(options, policies)
return _load_policies
def validate(options):
load_resources()
if options.config is not None:
# support the old -c option
options.configs.append(options.config)
if len(options.configs) < 1:
# no configs to test
# We don't have the parser object, so fake ArgumentParser.error
print('custodian validate: error: no config files specified',
file=sys.stderr)
sys.exit(2)
used_policy_names = set()
schm = schema.generate()
errors = []
for config_file in options.configs:
config_file = os.path.expanduser(config_file)
if not os.path.exists(config_file):
raise ValueError("Invalid path for config %r" % config_file)
options.dryrun = True
format = config_file.rsplit('.', 1)[-1]
with open(config_file) as fh:
if format in ('yml', 'yaml'):
data = yaml.safe_load(fh.read())
if format in ('json',):
data = json.load(fh)
errors = schema.validate(data, schm)
conf_policy_names = {p['name'] for p in data.get('policies', ())}
dupes = conf_policy_names.intersection(used_policy_names)
if len(dupes) >= 1:
errors.append(ValueError(
"Only one policy with a given name allowed, duplicates: %s" % (
", ".join(dupes)
)
))
used_policy_names = used_policy_names.union(conf_policy_names)
if not errors:
null_config = Bag(dryrun=True, log_group=None, cache=None, assume_role="na")
for p in data.get('policies', ()):
try:
Policy(p, null_config, Bag())
except Exception as e:
msg = "Policy: %s is invalid: %s" % (
p.get('name', 'unknown'), e)
errors.append(msg)
if not errors:
log.info("Configuration valid: {}".format(config_file))
continue
log.error("Configuration invalid: {}".format(config_file))
for e in errors:
log.error(" %s" % e)
if errors:
sys.exit(1)
@policy_command
def run(options, policies):
exit_code = 0
for policy in policies:
try:
policy()
except Exception:
exit_code = 1
if options.debug:
raise
log.exception(
"Error while executing policy %s, continuing" % (
policy.name))
sys.exit(exit_code)
@policy_command
def report(options, policies):
assert len(policies) == 1, "Only one policy report at a time"
policy = policies.pop()
d = datetime.now()
delta = timedelta(days=options.days)
begin_date = d - delta
do_report(
policy, begin_date, options, sys.stdout,
raw_output_fh=options.raw)
@policy_command
def logs(options, policies):
assert len(policies) == 1, "Only one policy log at a time"
policy = policies.pop()
if not policy.is_lambda:
log.debug('lambda only atm')
return
session_factory = SessionFactory(
options.region, options.profile, options.assume_role)
manager = mu.LambdaManager(session_factory)
for e in manager.logs(mu.PolicyLambda(policy)):
print("%s: %s" % (
time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(e['timestamp'] / 1000)),
e['message']))
def _schema_get_docstring(starting_class):
""" Given a class, return its docstring.
If no docstring is present for the class, search base classes in MRO for a
docstring.
"""
for cls in inspect.getmro(starting_class):
if inspect.getdoc(cls):
return inspect.getdoc(cls)
def schema_cmd(options):
""" Print info about the resources, actions and filters available. """
if options.json:
schema.json_dump(options.resource)
return
load_resources()
resource_mapping = schema.resource_vocabulary()
if options.summary:
schema.summary(resource_mapping)
return
# Here are the formats for what we accept:
# - No argument
# - List all available RESOURCES
# - RESOURCE
# - List all available actions and filters for supplied RESOURCE
# - RESOURCE.actions
# - List all available actions for supplied RESOURCE
# - RESOURCE.actions.ACTION
# - Show class doc string and schema for supplied action
# - RESOURCE.filters
# - List all available filters for supplied RESOURCE
# - RESOURCE.filters.FILTER
# - Show class doc string and schema for supplied filter
if not options.resource:
resource_list = {'resources': sorted(resources.keys()) }
print(yaml.safe_dump(resource_list, default_flow_style=False))
return
# Format is RESOURCE.CATEGORY.ITEM
components = options.resource.split('.')
#
# Handle resource
#
resource = components[0].lower()
if resource not in resource_mapping:
print('{} is not a valid resource'.format(resource), file=sys.stderr)
sys.exit(2)
if len(components) == 1:
del(resource_mapping[resource]['classes'])
output = {resource: resource_mapping[resource]}
print(yaml.safe_dump(output))
return
#
# Handle category
#
category = components[1].lower()
if category not in ('actions', 'filters'):
print(("Valid choices are 'actions' and 'filters'."
" You supplied '{}'").format(category), file=sys.stderr)
sys.exit(2)
if len(components) == 2:
output = "No {} available for resource {}.".format(category, resource)
if category in resource_mapping[resource]:
output = {resource: {
category: resource_mapping[resource][category]}}
print(yaml.safe_dump(output))
return
#
# Handle item
#
item = components[2].lower()
if item not in resource_mapping[resource][category]:
print('{} is not in the {} list for resource {}'.format(
item, category, resource), file=sys.stderr)
sys.exit(2)
if len(components) == 3:
cls = resource_mapping[resource]['classes'][category][item]
# Print docstring
docstring = _schema_get_docstring(cls)
print("\nHelp\n----\n")
if docstring:
print(docstring)
else:
# Shouldn't ever hit this, so exclude from cover
print("No help is available for this item.") # pragma: no cover
# Print schema
print("\nSchema\n------\n")
pp = pprint.PrettyPrinter(indent=4)
if hasattr(cls, 'schema'):
pp.pprint(cls.schema)
else:
# Shouldn't ever hit this, so exclude from cover
print("No schema is available for this item.", file=sys.sterr) # pragma: no cover
print('')
return
# We received too much (e.g. s3.actions.foo.bar)
print("Invalid selector '{}'. Max of 3 components in the "
"format RESOURCE.CATEGORY.ITEM".format(options.resource),
file=sys.stderr)
sys.exit(2)
def _metrics_get_endpoints(options):
""" Determine the start and end dates based on user-supplied options. """
if bool(options.start) ^ bool(options.end):
print('Error: --start and --end must be specified together',
file=sys.stderr)
sys.exit(2)
if options.start and options.end:
start = options.start
end = options.end
else:
end = datetime.utcnow()
start = end - timedelta(options.days)
return start, end
@policy_command
def metrics_cmd(options, policies):
start, end = _metrics_get_endpoints(options)
data = {}
for p in policies:
log.info('Getting %s metrics', p)
data[p.name] = p.get_metrics(start, end, options.period)
print(dumps(data, indent=2))
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# awl.tests.test_utils.py\nimport sys\nfrom io import StringIO\n\nfrom django.test import TestCase\n\nfrom awl.tests.models import Link\nfrom awl.utils import (URLTree, refetch, refetch_for_update, render_page,\n re... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# awl.tests.test_utils.py\nimport sys\nfrom io import StringIO\n\nfrom django.test import TestCase\n\nfrom awl.tests.models import Link\nfrom awl.utils import (URLTree, refetch, refetch_for_update, rend... | ```python
# awl.tests.test_utils.py
import sys
from io import StringIO
from django.test import TestCase
from awl.tests.models import Link
from awl.utils import (URLTree, refetch, refetch_for_update, render_page,
render_page_to_string, get_field_names, get_obj_attr)
from awl.waelsteng import FakeRequest
# ============================================================================
class UtilsTest(TestCase):
def test_url_tree(self):
# print_tree() exercises everything, so run it and capture stdout
tree = URLTree()
saved_stdout = sys.stderr
try:
out = StringIO()
sys.stdout = out
tree.print_tree()
finally:
sys.stdout = saved_stdout
def test_refetch(self):
link = Link.objects.create(url='url', text='text')
link.text = 'foo'
link = refetch(link)
self.assertEqual('url', link.url)
self.assertEqual('text', link.text)
link.text = 'foo'
link = refetch_for_update(link)
self.assertEqual('url', link.url)
self.assertEqual('text', link.text)
def test_renders(self):
request = FakeRequest()
expected = 'Hello World\n'
result = render_page_to_string(request, 'sample.html', {'name':'World'})
self.assertEqual(expected, result)
response = render_page(request, 'sample.html', {'name':'World'})
self.assertEqual(expected, response.content.decode('ascii'))
def test_get_field_names(self):
from awl.tests.models import Person
# test defaults, ignore order
expected = ['name', 'phone']
result = get_field_names(Person)
self.assertEqual(set(result), set(expected))
# test ignore_auto, ignore_relations and exclude
expected.extend(['id', 'building', 'address', 'courses', 'best_friend',
'person'])
expected.remove('phone')
result = get_field_names(Person, ignore_auto=False,
ignore_relations=False, exclude=['phone'])
self.assertEqual(set(result), set(expected))
def test_get_obj_attr(self):
# --- data for testing
class Character(object):
pass
class Cartoon(object):
pass
barney = Character()
barney.name = 'Barney'
betty = Character()
betty.name = 'Betty'
betty.husband = barney
wilma = Character()
wilma.name = 'Wilma'
wilma.friend = betty
cartoon = Cartoon()
cartoon.name = 'Flinstones'
cartoon.character = wilma
# --- tests
self.assertEqual('Flinstones', get_obj_attr(cartoon, 'name'))
self.assertEqual(wilma, get_obj_attr(cartoon, 'character'))
self.assertEqual(betty, get_obj_attr(cartoon, 'character__friend'))
self.assertEqual(barney, get_obj_attr(cartoon,
'character__friend__husband'))
with self.assertRaises(AttributeError):
get_obj_attr(cartoon, 'foo')
with self.assertRaises(AttributeError):
get_obj_attr(cartoon, 'character__foo')
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"\nAuto-auth page (used to automatically log in during testing).\n\"\"\"\n\nimport urllib\nfrom bok_choy.page_object import PageObject\nfrom . import BASE_URL\n\n\nclass AutoAuthPage(PageObject):\n \"\"\"\n The automatic authorization page.\n Wh... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\nAuto-auth page (used to automatically log in during testing).\n\"\"\"\n\nimport urllib\nfrom bok_choy.page_object import PageObject\nfrom . import BASE_URL\n\n\nclass AutoAuthPage(PageObject):\n \"\"\"\n The automatic authorizati... | ```python
"""
Auto-auth page (used to automatically log in during testing).
"""
import urllib
from bok_choy.page_object import PageObject
from . import BASE_URL
class AutoAuthPage(PageObject):
"""
The automatic authorization page.
When allowed via the django settings file, visiting
this url will create a user and log them in.
"""
name = "studio.auto_auth"
def url(self, username=None, email=None, password=None, staff=None, course_id=None): #pylint: disable=W0221
"""
Auto-auth is an end-point for HTTP GET requests.
By default, it will create accounts with random user credentials,
but you can also specify credentials using querystring parameters.
`username`, `email`, and `password` are the user's credentials (strings)
`staff` is a boolean indicating whether the user is global staff.
`course_id` is the ID of the course to enroll the student in.
Currently, this has the form "org/number/run"
Note that "global staff" is NOT the same as course staff.
"""
# The base URL, used for creating a random user
url = BASE_URL + "/auto_auth"
# Create query string parameters if provided
params = {}
if username is not None:
params['username'] = username
if email is not None:
params['email'] = email
if password is not None:
params['password'] = password
if staff is not None:
params['staff'] = "true" if staff else "false"
if course_id is not None:
params['course_id'] = course_id
query_str = urllib.urlencode(params)
# Append the query string to the base URL
if query_str:
url += "?" + query_str
return url
def is_browser_on_page(self):
return True
``` |
[
{
"content": "Here is the code block:\n```python\n# export_2d.py\n# functions to help to generate 2D dxf and svg plan.\n# created by charlyoleg on 2013/05/31\n#\n# (C) Copyright 2013 charlyoleg\n#\n# This file is part of the Cnc25D Python package.\n# \n# Cnc25D is free software: you can redistribute it and/or m... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# export_2d.py\n# functions to help to generate 2D dxf and svg plan.\n# created by charlyoleg on 2013/05/31\n#\n# (C) Copyright 2013 charlyoleg\n#\n# This file is part of the Cnc25D Python package.\n# \n# Cnc25D is free software: you can redistri... | ```python
# export_2d.py
# functions to help to generate 2D dxf and svg plan.
# created by charlyoleg on 2013/05/31
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
"""
export_2d.py provides functions to create DXF file from a FreeCAD Part Oject
"""
################################################################
# header for Python / FreeCAD compatibility
################################################################
import importing_freecad
importing_freecad.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
#FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI
################################################################
# import
################################################################
import Part
from FreeCAD import Base
import importDXF
import Drawing
#import FreeCADGui
################################################################
# export_2d sub-functions
################################################################
def draw_rectangle(ai_position_x, ai_position_y, ai_size_x, ai_size_y):
p1 = Base.Vector(ai_position_x+0*ai_size_x, ai_position_y+0*ai_size_y, 0)
p2 = Base.Vector(ai_position_x+1*ai_size_x, ai_position_y+0*ai_size_y, 0)
p3 = Base.Vector(ai_position_x+1*ai_size_x, ai_position_y+1*ai_size_y, 0)
p4 = Base.Vector(ai_position_x+0*ai_size_x, ai_position_y+1*ai_size_y, 0)
r_rectangle_outline=[]
r_rectangle_outline.append(Part.makeLine(p1, p2))
r_rectangle_outline.append(Part.makeLine(p2, p3))
r_rectangle_outline.append(Part.makeLine(p3, p4))
r_rectangle_outline.append(Part.makeLine(p4, p1))
#r_rectangle = Part.Face(Part.Wire(r_rectangle_outline))
r_rectangle = r_rectangle_outline
return(r_rectangle)
def draw_gauge(ai_drawing_length, ai_drawing_height, ai_representation_max, ai_representation_value, ai_position_x, ai_position_y):
l_gauge_value = ai_drawing_length*ai_representation_value/float(ai_representation_max)
#print("dbg067: l_gauge_value:", l_gauge_value)
r_gauge = []
r_gauge.extend(draw_rectangle(ai_position_x-ai_drawing_height/2.0, ai_position_y, ai_drawing_length+ai_drawing_height, ai_drawing_height))
r_gauge.extend(draw_rectangle(ai_position_x, ai_position_y+ai_drawing_height/4.0, l_gauge_value, ai_drawing_height/2.0))
return(r_gauge)
################################################################
# export_2d API
################################################################
def export_to_dxf_abandoned(ai_solid, ai_vector, ai_depth, ai_output_file): # it works only the FreeCAD Gui
""" [Obsolete] create a DXF of a slice of FreeCAD Part Object
"""
l_slices = ai_solid.slice(ai_vector, ai_depth)
l_doc = App.newDocument("tmp_doc")
i=0
for l_shape in l_slices:
i += 1
l_obj = l_doc.addObject("Part::Feature","MyShape{:02d}".format(i))
#l_doc.MyShape.Shape = l_shape
#App.ActiveDocument.MyShape.Shape = l_shape
l_obj.Shape = l_shape
#l_doc.recompute()
l_objects = App.ActiveDocument.Objects
#l_objects = FreeCAD.ActiveDocument.Objects
# this work with the gui but not in pure python script
# Suspect root cause:
# /usr/lib/freecad/Mod/Draft/importDXF.py line:49
# it seems it doesn't detect the gui is off
importDXF.export(l_objects, ai_output_file)
return(1)
def export_to_dxf(ai_solid, ai_vector, ai_depth, ai_output_file):
""" create a DXF of a slice of FreeCAD Part Object
"""
l_slice = Part.makeCompound(ai_solid.slice(ai_vector, ai_depth)) # slice the plank in the ai_vector plan at a the height ai_depth
r_dxf = Drawing.projectToDXF(l_slice, ai_vector)
#r_dxf = Drawing.projectToDXF(ai_solid, ai_vector) # works also :)
fh_output = open(ai_output_file, 'w')
fh_output.write(r_dxf)
fh_output.close()
return(1)
def export_to_svg(ai_solid, ai_vector, ai_depth, ai_output_file):
""" create a SVG of a slice of FreeCAD Part Object. The generated SVG is incomplete. SVG header must be added to it to be opened by Inkscape
"""
l_slice = Part.makeCompound(ai_solid.slice(ai_vector, ai_depth)) # slice the plank in the ai_vector plan at a the height ai_depth
r_dxf = Drawing.projectToSVG(l_slice, ai_vector) # it generates a snippet of svg not directly usable by Inkscape. It needs the svg head and document markers.
#r_dxf = Drawing.projectToSVG(ai_solid, ai_vector) # works also :)
fh_output = open(ai_output_file, 'w')
fh_output.write(r_dxf)
fh_output.close()
return(1)
def export_xyz_to_dxf(ai_solid, ai_size_x, ai_size_y, ai_size_z, ai_xy_slice_list, ai_xz_slice_list, ai_yz_slice_list, ai_output_file):
""" Cut a FreeCAD Part Object in many slices in the three directions X, Y and Z and put all those slices in a DXF file
"""
# calculate the space between two drawings
l_space = max(ai_size_x/5.0, ai_size_y/5.0, ai_size_z/5.0)
#
vec_z_unit = Base.Vector(0,0,1)
#
l_slice_list = []
l_pos_y = 0
for lo in ['xy','xz','yz']:
#l_solid = ai_solid
l_solid = ai_solid.copy()
l_depth_list = []
l_shift_x = 0
l_gauge_max = 0
if(lo=='xy'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(0,0,1), 0)
l_solid.translate(Base.Vector(0,0,0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,2*ai_size_z+7*l_space,0))
l_pos_y = 2*ai_size_z+6*l_space
l_depth_list = ai_xy_slice_list
l_shift_x = ai_size_x
l_gauge_max = ai_size_z
elif(lo=='xz'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(1,0,0), -90)
l_solid.translate(Base.Vector((ai_size_x-ai_size_x)/2.0, (ai_size_z-ai_size_y)/2.0, (ai_size_y-ai_size_z)/2.0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,1*ai_size_z+4*l_space,0))
l_pos_y = 1*ai_size_z+3*l_space
l_depth_list = ai_xz_slice_list
l_shift_x = ai_size_x
l_gauge_max = ai_size_y
elif(lo=='yz'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(0,0,1), -90)
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(1,0,0), -90)
l_solid.translate(Base.Vector((ai_size_y-ai_size_x)/2.0, (ai_size_z-ai_size_y)/2.0, (ai_size_x-ai_size_z)/2.0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,l_space,0))
l_pos_y = 0*ai_size_z+0*l_space
l_depth_list = ai_yz_slice_list
l_shift_x = ai_size_y
l_gauge_max = ai_size_x
l_pos_x = 0
for l_depth in l_depth_list:
#print("dbg163: l_shift_x l_space l_gauge_max l_depth l_pos_x l_pos_y", l_shift_x, l_space, l_gauge_max, l_depth, l_pos_x, l_pos_y)
l_slice_list.extend(draw_gauge(l_shift_x, l_space/2.0, l_gauge_max, l_depth, l_pos_x, l_pos_y))
l_pos_x += l_shift_x+2*l_space
ll_depth = l_depth
if(lo=='xz'):
ll_depth = ai_size_y-l_depth
#print("dbg168: ll_depth:", ll_depth)
l_slice_list.extend(l_solid.slice(vec_z_unit, ll_depth))
l_solid.translate(Base.Vector(l_shift_x+2*l_space,0,0))
l_slice = Part.makeCompound(l_slice_list)
# temporary commented because of OpenCascade bug
#r_dxf = Drawing.projectToDXF(l_slice, vec_z_unit)
##r_dxf = Drawing.projectToDXF(ai_solid, ai_vector)
#fh_output = open(ai_output_file, 'w')
#fh_output.write(r_dxf)
#fh_output.close()
return(1)
``` |
[
{
"content": "```python\n#\n# Copyright (c) 2015 nexB Inc. and others. All rights reserved.\n# http://nexb.com and https://github.com/nexB/scancode-toolkit/\n# The ScanCode software is licensed under the Apache License version 2.0.\n# Data generated with ScanCode require an acknowledgment.\n# ScanCode is a trad... | [
{
"content": "<|memory_start|>```python\n#\n# Copyright (c) 2015 nexB Inc. and others. All rights reserved.\n# http://nexb.com and https://github.com/nexB/scancode-toolkit/\n# The ScanCode software is licensed under the Apache License version 2.0.\n# Data generated with ScanCode require an acknowledgment.\n# Sc... | ```python
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import logging
import string
import re
import url as urlpy
import ipaddress
from textcode import analysis
from cluecode import finder_data
LOG = logging.getLogger(__name__)
DEBUG = False
"""
Find patterns in text lines such as a emails and URLs.
Optionally apply filters to pattern matches.
"""
def find(location, patterns):
"""
Yield match and matched lines for patterns found in file at location as a
tuple of (key, found text, text line). Pattern is list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
if DEBUG:
from pprint import pformat
loc = pformat(location)
print('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals())
for i, line in enumerate(analysis.text_lines(location)):
lineno = i + 1
for key, pattern in patterns:
for match in pattern.findall(line):
if DEBUG:
print('find: yielding match: key=%(key)r, '
'match=%(match)r,\n line=%(line)r' % locals())
yield key, unicode(match), line, lineno
def find_and_filter(location, patterns, filters, unique=True):
"""
Yield match and matched line number for patterns found in file at location
as a tuple of (found text, line number). Pattern is list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
def unique_filter(matches):
"""
Iterate over matches and yield unique matches.
"""
uniques = set()
for key, match, line, lineno in matches:
if (key, match,) in uniques:
continue
uniques.add((key, match,))
yield key, match, line, lineno
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, lineno) and must return an iterable of tuples of (key, match, line,
lineno).
"""
for filt in filters:
matches = filt(matches)
return matches
def build_regex_filter(pattern):
"""
Return a filter function using regex pattern, filtering out matches
matching this regex. The pattern should be text, not a compiled re.
"""
def re_filt(matches):
for key, match, line, lineno in matches:
if re.match(regex, match):
if DEBUG:
print('build_regex_filter(pattern=%(pattern)r: '
'filtering match: %(match)r' % locals())
continue
yield key, match, line, lineno
regex = re.compile(pattern, re.UNICODE | re.I)
return re_filt
# A good reference page of email address regex is:
# http://fightingforalostcause.net/misc/2006/compare-email-regex.php email
# regex from http://www.regular-expressions.info/regexbuddy/email.html
def emails_regex():
return re.compile(r'\b[A-Z0-9._%-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b', re.IGNORECASE)
def find_emails(location, unique=True):
"""
Yield emails found in file at location.
Only return unique items if unique is True.
"""
patterns = [('emails', emails_regex(),)]
matches = find(location, patterns)
filters = (junk_email_domains_filter,)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, email, _line, lineno in matches:
yield email, lineno
def junk_email_domains_filter(matches):
"""
Given an iterable of email matches, return an iterable where email with
common uninteresting domains have been removed, such as local, non public
or example.com emails.
"""
for key, email, line, lineno in matches:
domain = email.split('@')[-1]
if not is_good_host(domain):
continue
yield key, email, line, lineno
def uninteresting_emails_filter(matches):
"""
Given an iterable of emails matches, return an iterable where common
uninteresting emails have been removed.
"""
for key, email, line, lineno in matches:
good_email = finder_data.classify_email(email)
if not good_email:
continue
yield key, email, line, lineno
# TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/
# TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/
schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\+git|https?\+svn|https?\+hg'
url_body = '[^\s<>\[\]"]'
def urls_regex():
# no space, no < >, no [ ] and no double quote
return re.compile(r'''
(
# URLs with schemes
(?:%(schemes)s)://%(url_body)s+
|
# common URLs prefix without schemes
(?:www|ftp)\.%(url_body)s+
|
# git style git@github.com:christophercantu/pipeline.git
git\@%(url_body)s+:%(url_body)s+\.git
)''' % globals()
, re.UNICODE | re.VERBOSE | re.IGNORECASE)
INVALID_URLS_PATTERN = '((?:' + schemes + ')://([$%*/_])+)'
def find_urls(location, unique=True):
"""
Yield urls found in file at location.
Only return unique items if unique is True.
"""
patterns = [('urls', urls_regex(),)]
matches = find(location, patterns)
# the order of filters IS important
filters = (
verbatim_crlf_url_cleaner,
end_of_url_cleaner,
empty_urls_filter,
scheme_adder,
user_pass_cleaning_filter,
build_regex_filter(INVALID_URLS_PATTERN),
canonical_url_cleaner,
junk_url_hosts_filter,
junk_urls_filter,
)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, url, _line, lineno in matches:
yield unicode(url), lineno
EMPTY_URLS = set(['https', 'http', 'ftp', 'www', ])
def empty_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable without empty URLs.
"""
for key, match, line, lineno in matches:
junk = match.lower().strip(string.punctuation).strip()
if not junk or junk in EMPTY_URLS:
if DEBUG:
print('empty_urls_filter: filtering match: %(match)r'
% locals())
continue
yield key, match, line, lineno
def verbatim_crlf_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where literal end of
lines and carriage return characters that may show up as-is, un-encoded in
a URL have been removed.
"""
# FIXME: when is this possible and could happen?
for key, url, line, lineno in matches:
if not url.endswith('/'):
url = url.replace(r'\n', '')
url = url.replace(r'\r', '')
yield key, url, line, lineno
def end_of_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where junk characters
commonly found at the end of a URL are removed.
This is not entirely correct, but works practically.
"""
for key, url, line, lineno in matches:
if not url.endswith('/'):
url = url.replace(u'<', u'<')
url = url.replace(u'>', u'>')
url = url.replace(u'&', u'&')
url = url.rstrip(string.punctuation)
url = url.split(u'\\')[0]
url = url.split(u'<')[0]
url = url.split(u'>')[0]
url = url.split(u'(')[0]
url = url.split(u')')[0]
url = url.split(u'[')[0]
url = url.split(u']')[0]
url = url.split(u'"')[0]
url = url.split(u"'")[0]
yield key, url, line, lineno
non_standard_urls_prefix = ('git@',)
def is_filterable(url):
"""
Return True if a url is eligible for filtering. Certain URLs should not pass
through certain filters (such as a git@github.com style urls)
"""
return not url.startswith(non_standard_urls_prefix)
def scheme_adder(matches):
"""
Add a fake http:// scheme if there was none.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
match = add_fake_scheme(match)
yield key, match, line, lineno
def add_fake_scheme(url):
"""
Add a fake http:// scheme to URL if has none.
"""
if not has_scheme(url):
url = u'http://' + url.lstrip(u':/').strip()
return url
def has_scheme(url):
"""
Return True if url has a scheme.
"""
return re.match('^(?:%(schemes)s)://.*' % globals(), url)
def user_pass_cleaning_filter(matches):
"""
Given an iterable of URL matches, return an iterable where user and
password are removed from the URLs host.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
host, _domain = url_host_domain(match)
if not host:
if DEBUG:
print('user_pass_cleaning_filter: '
'filtering match(no host): %(match)r' % locals())
continue
if '@' in host:
# strips any user/pass
host = host.split(u'@')[-1]
yield key, match, line, lineno
def canonical_url(uri):
"""
Return the canonical representation of a given URI.
This assumes the `uri` has a scheme.
* When a default port corresponding for the scheme is explicitly declared
(such as port 80 for http), the port will be removed from the output.
* Fragments '#' are not removed.
* Params and query string arguments are not reordered.
"""
normalized = urlpy.parse(uri).sanitize().punycode()
if normalized._port == urlpy.PORTS.get(normalized._scheme, None):
normalized._port = None
return normalized.utf8()
def canonical_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where URLs have been
canonicalized.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
match = canonical_url(match)
if DEBUG:
print('canonical_url_cleaner: '
'match=%(match)r, canonic=%(canonic)r' % locals())
yield key, match , line, lineno
IP_V4_RE = r'^(\d{1,3}\.){0,3}\d{1,3}$'
def is_ip_v4(s):
return re.compile(IP_V4_RE).match(s)
IP_V6_RE = (
r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$'
'|'
r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$'
)
def is_ip_v6(s):
"""
Return True is string s is an IP V6 address
"""
return re.compile(IP_V6_RE).match(s)
def is_ip(s):
"""
Return True is string s is an IP address
"""
return is_ip_v4(s) or is_ip_v6(s)
def get_ip(s):
"""
Return True is string s is an IP address
"""
if not is_ip(s):
return False
try:
ip = ipaddress.ip_address(unicode(s))
return ip
except ValueError:
return False
def is_private_ip(ip):
"""
Return true if ip object is a private or local IP.
"""
if ip:
if isinstance(ip, ipaddress.IPv4Address):
private = (
ip.is_reserved
or ip.is_private
or ip.is_multicast
or ip.is_unspecified
or ip.is_loopback
or ip.is_link_local
)
else:
private(
ip.is_multicast
or ip.is_reserved
or ip.is_link_local
or ip.is_site_local
or ip.is_private
or ip.is_unspecified
or ip.is_loopback
)
return private
def is_good_host(host):
"""
Return True if the host is not some local or uninteresting host.
"""
if not host:
return False
ip = get_ip(host)
if ip:
if is_private_ip(ip):
return False
return finder_data.classify_ip(host)
# at this stage we have a host name, not an IP
if '.' not in host:
# private hostnames not in a domain, including localhost
return False
good_host = finder_data.classify_host(host)
return good_host
def url_host_domain(url):
"""
Return a tuple of the (host, domain) of a URL or None. Assumes that the
URL has a scheme.
"""
parsed = urlpy.parse(url)
host = parsed._host
if not host:
return None, None
host = host.lower()
domain = parsed.pld().lower()
return host, domain
def junk_url_hosts_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting hosts or domains have been removed, such as local,
non public or example.com URLs.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
host, domain = url_host_domain(match)
if not is_good_host(host):
if DEBUG:
print('junk_url_hosts_filter: '
'!is_good_host:%(host)r): %(match)r' % locals())
continue
if not is_good_host(domain) and not is_ip(host):
if DEBUG:
print('junk_url_hosts_filter: ''!is_good_host:%(domain)r '
'and !is_ip:%(host)r: %(match)r' % locals())
continue
yield key, match, line, lineno
def junk_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting URLs, or uninteresting URL hosts or domains have been
removed, such as local, non public or example.com URLs.
"""
for key, match, line, lineno in matches:
good_url = finder_data.classify_url(match)
if not good_url:
if DEBUG:
print('junk_url_filter: %(match)r' % locals())
continue
yield key, match, line, lineno
def find_pattern(location, pattern, unique=False):
"""
Find regex pattern in the text lines of file at location.
Return all match groups joined as one unicode string.
Only return unique items if unique is True.
"""
pattern = re.compile(pattern, re.UNICODE | re.I)
matches = find(location, [(None, pattern,)])
if unique:
matches = unique_filter(matches)
for _key, match , _line, lineno in matches:
yield match, lineno
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom runner.koan import *\n\nclass AboutTuples(Koan):\n def test_creating_a_tuple(self):\n count_of_three = (1, 2, 5)\n self.assertEqual(5, count_of_three[2])\n\n def test_tuples_are_immutable_s... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom runner.koan import *\n\nclass AboutTuples(Koan):\n def test_creating_a_tuple(self):\n count_of_three = (1, 2, 5)\n self.assertEqual(5, count_of_three[2])\n\n def test_tuples... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
msg = ex.args[0]
# Note, assertRegexpMatches() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertRegexpMatches(msg, "'tuple' object")
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
with self.assertRaises(AttributeError): count_of_three.append("boom")
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, "boom"), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(("Hello comma!",), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S','u','r','p','r','i','s','e','!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple() , ())
self.assertEqual(() , tuple()) #Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) )
self.assertEqual('Cthulu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/lic... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import MigrationServiceGrpcTransport
class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
"""gRPC AsyncIO backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Awaitable[migration_service.SearchMigratableResourcesResponse],
]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
Awaitable[~.SearchMigratableResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_migratable_resources" not in self._stubs:
self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources",
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs["search_migratable_resources"]
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_migrate_resources" not in self._stubs:
self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources",
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_migrate_resources"]
__all__ = ("MigrationServiceGrpcAsyncIOTransport",)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n\"\"\"\nproblem68.py\n\nhttps://projecteuler.net/problem=68\n\nWhat is the maximum 16-digit string for a 'magic' 5-gon ring?\n\"\"\"\nfrom itertools import chain\n\nflatten = chain.from_iterable\n\ndef five_gon_rings(n):\n \"\"\"Ret... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n\"\"\"\nproblem68.py\n\nhttps://projecteuler.net/problem=68\n\nWhat is the maximum 16-digit string for a 'magic' 5-gon ring?\n\"\"\"\nfrom itertools import chain\n\nflatten = chain.from_iterable\n\ndef five_gon_rings(n)... | ```python
"""
problem68.py
https://projecteuler.net/problem=68
What is the maximum 16-digit string for a 'magic' 5-gon ring?
"""
from itertools import chain
flatten = chain.from_iterable
def five_gon_rings(n):
"""Return list of solutions to the 'magic' 5-gon ring problem, each line
summing to n. The empty list will be returned if there are no solutions."""
rings = [([a, b, c], [d, c, e], [f, e, g], [h, g, i], [j, i, b])
for a in range(1, 10+1)
for b in range(1, 10+1) if b != a
for c in range(1, 10+1) if c not in [a, b]
if a + b + c == n
for d in range(1, 10+1) if d not in [a, b, c]
for e in range(1, 10+1) if e not in [a, b, c, d]
if d + c + e == n
for f in range(1, 10+1) if f not in [a, b, c, d, e]
for g in range(1, 10+1) if g not in [a, b, c, d, e, f]
if f + e + g == n
for h in range(1, 10+1) if h not in [a, b, c, d, e, f, g]
for i in range(1, 10+1) if i not in [a, b, c, d, e, f, g, h]
if h + g + i == n
for j in range(1, 10+1) if j not in [a, b, c, d, e, f, g, h, i]
if j + i + b == n
if a < min(d, f, h, j)]
# Each solution can be described uniquely starting from the group of three
# with the numerically lowest external node and working clockwise.
# So we specified at the end that a < min(d, f, h, j)
return rings
def problem68():
START = 6 # each line cannot sum to less than 6 (1+2+3)
END = 27+1 # or greater than 27 (8+9+10)
# Collect solution candidates, flattening into one array of solutions.
rings = flatten(five_gon_rings(n) for n in range(START, END))
# Filter out the empty lists
rings = filter(bool, rings)
# Transform each solution tuple into a string of digits.
rings = [''.join(str(x) for x in flatten(solution)) for solution in rings]
# Find the max 16-digit string.
return int(max(solution for solution in rings if len(solution) == 16))
if __name__ == "__main__":
print(problem68())
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2014-2016 The HyperSpyUI developers\n#\n# This file is part of HyperSpyUI.\n#\n# HyperSpyUI is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Gene... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2014-2016 The HyperSpyUI developers\n#\n# This file is part of HyperSpyUI.\n#\n# HyperSpyUI is free software: you can redistribute it and/or modify\n# it under the terms... | ```python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Mon Oct 27 21:17:42 2014
@author: Vidar Tonaas Fauske
"""
# Set proper backend for matplotlib
import matplotlib
matplotlib.use('module://hyperspyui.mdi_mpl_backend')
matplotlib.interactive(True)
import os
import warnings
import sys
import logging
from python_qt_binding import QtGui, QtCore
from QtCore import *
from QtGui import *
from .widgets.consolewidget import ConsoleWidget
import hyperspyui.mdi_mpl_backend
from .pluginmanager import PluginManager
from hyperspyui.settings import Settings
from hyperspyui.widgets.settingsdialog import SettingsDialog
from hyperspyui.exceptions import ProcessCanceled
from hyperspyui.log import logger
def myexcepthook(exctype, value, traceback):
if exctype == ProcessCanceled:
logger.info("User cancelled operation")
else:
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = myexcepthook
def tr(text):
return QCoreApplication.translate("MainWindow", text)
def lowpriority():
""" Set the priority of the process to below-normal."""
if sys.platform == 'win32':
# Based on:
# "Recipe 496767: Set Process Priority In Windows" on ActiveState
# http://code.activestate.com/recipes/496767/
try:
import win32api
import win32process
import win32con
except ImportError as e:
warnings.warn("Could not set process priority: %s" % e)
return
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(
handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
import os
os.nice(1)
def normalpriority():
""" Set the priority of the process to below-normal."""
if sys.platform == 'win32':
# Based on:
# "Recipe 496767: Set Process Priority In Windows" on ActiveState
# http://code.activestate.com/recipes/496767/
try:
import win32api
import win32process
import win32con
except ImportError as e:
warnings.warn("Could not set process priority: %s" % e)
return
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(
handle, win32process.NORMAL_PRIORITY_CLASS)
else:
import os
# Reset nice to 0
os.nice(-os.nice(0))
class MainWindowBase(QMainWindow):
"""
Base layer in application stack. Should handle the connection to our custom
matplotlib backend, and manage the Figures. As such, it does not know
anything about hyperspy, and can readily be reused for other projects.
Should also set up basic UI with utilities, and relevant functions for
inhereting classes to override.
"""
def __init__(self, parent=None):
super(MainWindowBase, self).__init__(parent)
# Setup settings:
self.settings = Settings(self, 'General')
# Default setting values:
self.settings.set_default('toolbar_button_size', 24)
self.settings.set_default('default_widget_floating', False)
self.settings.set_default('working_directory', "")
self.settings.set_default('low_process_priority', False)
# Override any possible invalid stored values, which could prevent load
if 'toolbar_button_size' not in self.settings or \
not isinstance(self.settings['toolbar_button_size'], int):
self.settings['toolbar_button_size'] = 24
if self.low_process_priority:
lowpriority()
# State varaibles
self.should_capture_traits = None
self.active_tool = None
# Collections
self.widgets = [] # Widgets in widget bar
self.figures = [] # Matplotlib figures
self.editors = [] # EditorWidgets
self.traits_dialogs = []
self.actions = {}
self._action_selection_cbs = {}
self.toolbars = {}
self.menus = {}
self.tools = []
self.plugin_manager = None
# MPL backend bindings
hyperspyui.mdi_mpl_backend.connect_on_new_figure(self.on_new_figure)
hyperspyui.mdi_mpl_backend.connect_on_destroy(self.on_destroy_figure)
# Create UI
self.windowmenu = None
self.create_ui()
# Connect figure management functions
self.main_frame.subWindowActivated.connect(self.on_subwin_activated)
# Save standard layout/state
self.settings.set_default('_geometry', self.saveGeometry())
self.settings.set_default('_windowState', self.saveState())
# Restore layout/state if saved
geometry = self.settings['_geometry']
state = self.settings['_windowState']
if geometry:
self.restoreGeometry(geometry)
if state:
self.restoreState(state)
@property
def toolbar_button_size(self):
return self.settings['toolbar_button_size', int]
@toolbar_button_size.setter
def toolbar_button_size(self, value):
self.settings['toolbar_button_size'] = value
self.setIconSize(
QSize(self.toolbar_button_size, self.toolbar_button_size))
@property
def cur_dir(self):
return self.settings['working_directory'] or ''
@cur_dir.setter
def cur_dir(self, value):
self.settings['working_directory'] = value
@property
def low_process_priority(self):
return self.settings['low_process_priority', bool]
@low_process_priority.setter
def low_process_priority(self, value):
self.settings['low_process_priority'] = value
if value:
lowpriority()
else:
normalpriority()
@property
def plugins(self):
return self.plugin_manager.plugins
def handleSecondInstance(self, argv):
# overload if needed
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized |
QtCore.Qt.WindowActive)
self.activateWindow()
def closeEvent(self, event):
self.settings['_geometry'] = self.saveGeometry()
self.settings['_windowState'] = self.saveState()
return super(MainWindowBase, self).closeEvent(event)
def reset_geometry(self):
self.settings.restore_key_default('_geometry')
self.settings.restore_key_default('_windowState')
geometry = self.settings['_geometry']
state = self.settings['_windowState']
if geometry:
self.restoreGeometry(geometry)
if state:
self.restoreState(state)
self.setWindowState(Qt.WindowMaximized)
def create_ui(self):
self.setIconSize(
QSize(self.toolbar_button_size, self.toolbar_button_size))
self.main_frame = QMdiArea()
self.setCorner(Qt.TopRightCorner, Qt.RightDockWidgetArea)
self.setCorner(Qt.TopLeftCorner, Qt.LeftDockWidgetArea)
logger.debug("Initializing plugins")
self.init_plugins()
logger.debug("Creating default actions")
self.create_default_actions() # Goes before menu/toolbar/widgetbar
# Needs to go before menu, so console can be in menu
logger.debug("Creating console")
self.create_console()
# This needs to happen before the widgetbar and toolbar
logger.debug("Creating menus")
self.create_menu()
logger.debug("Creating toolbars")
self.create_toolbars()
logger.debug("Creating widgets")
self.create_widgetbar()
self.setCentralWidget(self.main_frame)
def init_plugins(self):
self.plugin_manager = PluginManager(self)
self.plugin_manager.init_plugins()
def create_default_actions(self):
"""
Create default actions that can be used for e.g. toolbars and menus,
or triggered manually.
"""
logger.debug("Creating plugin actions")
self.plugin_manager.create_actions()
self.selectable_tools = QActionGroup(self)
self.selectable_tools.setExclusive(True)
# Nested docking action
ac_nested = QAction(tr("Nested docking"), self)
ac_nested.setStatusTip(tr("Allow nested widget docking"))
ac_nested.setCheckable(True)
ac_nested.setChecked(self.isDockNestingEnabled())
self.connect(ac_nested, SIGNAL('triggered(bool)'),
self.setDockNestingEnabled)
self.actions['nested_docking'] = ac_nested
# Tile windows action
ac_tile = QAction(tr("Tile"), self)
ac_tile.setStatusTip(tr("Arranges all figures in a tile pattern"))
self.connect(ac_tile, SIGNAL('triggered()'),
self.main_frame.tileSubWindows)
self.actions['tile_windows'] = ac_tile
# Cascade windows action
ac_cascade = QAction(tr("Cascade"), self)
ac_cascade.setStatusTip(
tr("Arranges all figures in a cascade pattern"))
self.connect(ac_cascade, SIGNAL('triggered()'),
self.main_frame.cascadeSubWindows)
self.actions['cascade_windows'] = ac_cascade
# Close all figures action
ac_close_figs = QAction(tr("Close all"), self)
ac_close_figs.setStatusTip(tr("Closes all matplotlib figures"))
self.connect(ac_close_figs, SIGNAL('triggered()'),
lambda: matplotlib.pyplot.close("all"))
self.actions['close_all_windows'] = ac_close_figs
# Reset geometry action
ac_reset_layout = QAction(tr("Reset layout"), self)
ac_reset_layout.setStatusTip(tr("Resets layout of toolbars and "
"widgets"))
self.connect(ac_reset_layout, SIGNAL('triggered()'),
self.reset_geometry)
self.actions['reset_layout'] = ac_reset_layout
def create_menu(self):
mb = self.menuBar()
# Window menu is filled in add_widget and add_figure
self.windowmenu = mb.addMenu(tr("&Windows"))
self.windowmenu.addAction(self._console_dock.toggleViewAction())
self.windowmenu.addAction(self.actions['nested_docking'])
# Figure windows go below this separator. Other windows can be added
# above it with insertAction(self.windowmenu_sep, QAction)
self.windowmenu_sep = self.windowmenu.addSeparator()
self.windowmenu.addAction(self.actions['tile_windows'])
self.windowmenu.addAction(self.actions['cascade_windows'])
self.windowmenu.addSeparator()
self.windowmenu.addAction(self.actions['close_all_windows'])
self.windowmenu_actions_sep = self.windowmenu.addSeparator()
self.plugin_manager.create_menu()
def create_tools(self):
"""Override to create tools on UI construction.
"""
self.plugin_manager.create_tools()
def create_toolbars(self):
"""
Override to create toolbars and toolbar buttons on UI construction.
It is called after create_default_action(), so add_toolbar_button()
can be used to add previously defined acctions.
"""
self.create_tools()
self.plugin_manager.create_toolbars()
def create_widgetbar(self):
"""
The widget bar itself is created and managed implicitly by Qt. Override
this function to add widgets on UI construction.
"""
self.plugin_manager.create_widgets()
def edit_settings(self):
"""
Shows a dialog for editing the application and plugins settings.
"""
d = SettingsDialog(self, self)
d.settings_changed.connect(self.on_settings_changed)
d.exec_()
def on_settings_changed(self):
"""
Callback for SettingsDialog, or anything else that updates settings
and need to apply the change.
"""
# Due to the way the property is defined, this updates the UI:
self.toolbar_button_size = self.toolbar_button_size
self.low_process_priority = self.low_process_priority
def select_tool(self, tool):
if self.active_tool is not None:
try:
self.active_tool.disconnect_windows(self.figures)
except Exception as e:
warnings.warn("Exception disabling tool %s: %s" % (
self.active_tool.get_name(), e))
self.active_tool = tool
tool.connect_windows(self.figures)
# --------- Figure management ---------
# --------- MPL Events ---------
def on_new_figure(self, figure, userdata=None):
"""
Callback for MPL backend.
"""
self.main_frame.addSubWindow(figure)
self.figures.append(figure)
self.windowmenu.addAction(figure.activateAction())
for tool in self.tools:
if tool.single_action() is not None:
tool.connect_windows(figure)
if self.active_tool is not None:
self.active_tool.connect_windows(figure)
def on_destroy_figure(self, figure, userdata=None):
"""
Callback for MPL backend.
"""
if figure in self.figures:
self.figures.remove(figure)
self.windowmenu.removeAction(figure.activateAction())
for tool in self.tools:
if tool.single_action() is not None:
tool.disconnect_windows(figure)
if self.active_tool is not None:
self.active_tool.disconnect_windows(figure)
self.main_frame.removeSubWindow(figure)
# --------- End MPL Events ---------
def on_subwin_activated(self, mdi_figure):
if mdi_figure and os.environ['QT_API'] == 'pyside':
mdi_figure.activateAction().setChecked(True)
self.check_action_selections(mdi_figure)
def check_action_selections(self, mdi_figure=None):
if mdi_figure is None:
mdi_figure = self.main_frame.activeSubWindow()
for key, cb in self._action_selection_cbs.items():
cb(mdi_figure, self.actions[key])
# --------- End figure management ---------
# --------- Console functions ---------
def _get_console_exec(self):
return ""
def _get_console_exports(self):
return {'ui': self}
def _get_console_config(self):
return None
def on_console_executing(self, source):
"""
Override when inherited to perform actions before exectuing 'source'.
"""
pass
def on_console_executed(self, response):
"""
Override when inherited to perform actions after executing, given the
'response' returned.
"""
pass
def create_console(self):
# We could inherit QAction, and have it reroute when it triggers,
# and then drop route when it finishes, however this will not catch
# interactive dialogs and such.
c = self._get_console_config()
self.settings.set_default('console_completion_type', 'droplist')
valid_completions = ConsoleWidget.gui_completion.values
self.settings.set_enum_hint('console_completion_type',
valid_completions)
gui_completion = self.settings['console_completion_type']
if gui_completion not in valid_completions:
gui_completion = 'droplist'
control = ConsoleWidget(config=c, gui_completion=gui_completion)
control.executing.connect(self.on_console_executing)
control.executed.connect(self.on_console_executed)
# This is where we push variables to the console
ex = self._get_console_exec()
push = self._get_console_exports()
control.ex(ex)
control.push(push)
self.console = control
self._console_dock = QDockWidget()
self._console_dock.setObjectName('console_widget')
self._console_dock.setWidget(control)
self._console_dock.setWindowTitle("Console")
self.addDockWidget(Qt.BottomDockWidgetArea, self._console_dock)
``` |
[
{
"content": "```python\n\"\"\"\nraven.transport.builtins\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport logging\nimport sys\nimport urllib2\n\nfrom raven.utils import all\n\ntry:\n # Goo... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nraven.transport.builtins\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport logging\nimport sys\nimport urllib2\n\nfrom raven.utils import all\n\... | ```python
"""
raven.transport.builtins
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
import sys
import urllib2
from raven.utils import all
try:
# Google App Engine blacklists parts of the socket module, this will prevent
# it from blowing up.
from socket import socket, AF_INET, SOCK_DGRAM, error as socket_error
has_socket = True
except:
has_socket = False
try:
import gevent
# gevent 1.0bN renamed coros to lock
try:
from gevent.lock import Semaphore
except ImportError:
from gevent.coros import Semaphore # NOQA
has_gevent = True
except:
has_gevent = None
try:
import twisted.web.client
import twisted.internet.protocol
has_twisted = True
except:
has_twisted = False
try:
from tornado import ioloop
from tornado.httpclient import AsyncHTTPClient, HTTPClient
has_tornado = True
except:
has_tornado = False
try:
import eventlet
from eventlet.green import urllib2 as eventlet_urllib2
has_eventlet = True
except:
has_eventlet = False
from raven.conf import defaults
from raven.transport.exceptions import InvalidScheme
class Transport(object):
"""
All transport implementations need to subclass this class
You must implement a send method and the compute_scope method.
Please see the HTTPTransport class for an example of a
compute_scope implementation.
"""
def check_scheme(self, url):
if url.scheme not in self.scheme:
raise InvalidScheme()
def send(self, data, headers):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
def compute_scope(self, url, scope):
"""
You need to override this to compute the SENTRY specific
additions to the variable scope. See the HTTPTransport for an
example.
"""
raise NotImplementedError
class BaseUDPTransport(Transport):
def __init__(self, parsed_url):
super(BaseUDPTransport, self).__init__()
self.check_scheme(parsed_url)
self._parsed_url = parsed_url
def send(self, data, headers):
auth_header = headers.get('X-Sentry-Auth')
if auth_header is None:
# silently ignore attempts to send messages without an auth header
return
host, port = self._parsed_url.netloc.split(':')
self._send_data(auth_header + '\n\n' + data, (host, int(port)))
def compute_scope(self, url, scope):
path_bits = url.path.rsplit('/', 1)
if len(path_bits) > 1:
path = path_bits[0]
else:
path = ''
project = path_bits[-1]
if not all([url.port, project, url.username, url.password]):
raise ValueError('Invalid Sentry DSN: %r' % url.geturl())
netloc = url.hostname
netloc += ':%s' % url.port
server = '%s://%s%s/api/store/' % (url.scheme, netloc, path)
scope.update({
'SENTRY_SERVERS': [server],
'SENTRY_PROJECT': project,
'SENTRY_PUBLIC_KEY': url.username,
'SENTRY_SECRET_KEY': url.password,
})
return scope
class UDPTransport(BaseUDPTransport):
scheme = ['udp']
def __init__(self, parsed_url):
super(UDPTransport, self).__init__(parsed_url)
if not has_socket:
raise ImportError('UDPTransport requires the socket module')
def _send_data(self, data, addr):
udp_socket = None
try:
try:
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.setblocking(False)
udp_socket.sendto(data, addr)
except socket_error:
# as far as I understand things this simply can't happen,
# but still, it can't hurt
pass
finally:
# Always close up the socket when we're done
if udp_socket is not None:
udp_socket.close()
udp_socket = None
class HTTPTransport(Transport):
scheme = ['http', 'https']
def __init__(self, parsed_url, timeout=defaults.TIMEOUT):
self.check_scheme(parsed_url)
self._parsed_url = parsed_url
self._url = parsed_url.geturl()
self.timeout = timeout
def send(self, data, headers):
"""
Sends a request to a remote webserver using HTTP POST.
"""
req = urllib2.Request(self._url, headers=headers)
if sys.version_info < (2, 6):
response = urllib2.urlopen(req, data).read()
else:
response = urllib2.urlopen(req, data, self.timeout).read()
return response
def compute_scope(self, url, scope):
netloc = url.hostname
if url.port and (url.scheme, url.port) not in \
(('http', 80), ('https', 443)):
netloc += ':%s' % url.port
path_bits = url.path.rsplit('/', 1)
if len(path_bits) > 1:
path = path_bits[0]
else:
path = ''
project = path_bits[-1]
if not all([netloc, project, url.username, url.password]):
raise ValueError('Invalid Sentry DSN: %r' % url.geturl())
server = '%s://%s%s/api/store/' % (url.scheme, netloc, path)
scope.update({
'SENTRY_SERVERS': [server],
'SENTRY_PROJECT': project,
'SENTRY_PUBLIC_KEY': url.username,
'SENTRY_SECRET_KEY': url.password,
})
return scope
class GeventedHTTPTransport(HTTPTransport):
scheme = ['gevent+http', 'gevent+https']
def __init__(self, parsed_url, maximum_outstanding_requests=100):
if not has_gevent:
raise ImportError('GeventedHTTPTransport requires gevent.')
self._lock = Semaphore(maximum_outstanding_requests)
super(GeventedHTTPTransport, self).__init__(parsed_url)
# remove the gevent+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
# this can be optimized by making a custom self.send that does not
# read the response since we don't use it.
self._lock.acquire()
return gevent.spawn(super(GeventedHTTPTransport, self).send, data, headers).link(self._done, self)
def _done(self, *args):
self._lock.release()
class TwistedHTTPTransport(HTTPTransport):
scheme = ['twisted+http', 'twisted+https']
def __init__(self, parsed_url):
if not has_twisted:
raise ImportError('TwistedHTTPTransport requires twisted.web.')
super(TwistedHTTPTransport, self).__init__(parsed_url)
self.logger = logging.getLogger('sentry.errors')
# remove the twisted+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def send(self, data, headers):
d = twisted.web.client.getPage(self._url, method='POST', postdata=data, headers=headers)
d.addErrback(lambda f: self.logger.error(
'Cannot send error to sentry: %s', f.getTraceback()))
class TwistedUDPTransport(BaseUDPTransport):
scheme = ['twisted+udp']
def __init__(self, parsed_url):
super(TwistedUDPTransport, self).__init__(parsed_url)
if not has_twisted:
raise ImportError('TwistedUDPTransport requires twisted.')
self.protocol = twisted.internet.protocol.DatagramProtocol()
twisted.internet.reactor.listenUDP(0, self.protocol)
def _send_data(self, data, addr):
self.protocol.transport.write(data, addr)
class TornadoHTTPTransport(HTTPTransport):
scheme = ['tornado+http']
def __init__(self, parsed_url):
if not has_tornado:
raise ImportError('TornadoHTTPTransport requires tornado.')
super(TornadoHTTPTransport, self).__init__(parsed_url)
# remove the tornado+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def send(self, data, headers):
kwargs = dict(method='POST', headers=headers, body=data)
# only use async if ioloop is running, otherwise it will never send
if ioloop.IOLoop.initialized():
client = AsyncHTTPClient()
kwargs['callback'] = None
else:
client = HTTPClient()
client.fetch(self._url, **kwargs)
class EventletHTTPTransport(HTTPTransport):
scheme = ['eventlet+http', 'eventlet+https']
def __init__(self, parsed_url, pool_size=100):
if not has_eventlet:
raise ImportError('EventletHTTPTransport requires eventlet.')
super(EventletHTTPTransport, self).__init__(parsed_url)
# remove the eventlet+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def _send_payload(self, payload):
req = eventlet_urllib2.Request(self._url, headers=payload[1])
try:
if sys.version_info < (2, 6):
response = eventlet_urllib2.urlopen(req, payload[0]).read()
else:
response = eventlet_urllib2.urlopen(req, payload[0],
self.timeout).read()
return response
except Exception, err:
return err
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (data, headers))
``` |
[
{
"content": "Repeat the following code:\n```python\n\"\"\"\nDjango settings for mobileinterest project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"\nDjango settings for mobileinterest project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6... | ```python
"""
Django settings for mobileinterest project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wdw3xjg7g9lc1nk&5867@=1th!3)^7+2#i$f++gzt*=8jo9+kq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
'views',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mobileinterest.urls'
WSGI_APPLICATION = 'mobileinterest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_DIRS = (
BASE_DIR+'/templates',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
CACHE_BACKEND = os.path.join(BASE_DIR, 'django_cache') #'file:///var/tmp/django_cache'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
``` |
[
{
"content": "```python\n# -*- coding: UTF-8 -*-\n#\n# (c) 2010 Mandriva, http://www.mandriva.com/\n#\n# This file is part of Mandriva Server Setup\n#\n# MSS is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Found... | [
{
"content": "<|memory_start|>```python\n# -*- coding: UTF-8 -*-\n#\n# (c) 2010 Mandriva, http://www.mandriva.com/\n#\n# This file is part of Mandriva Server Setup\n#\n# MSS is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Fre... | ```python
# -*- coding: UTF-8 -*-
#
# (c) 2010 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import uuid
import copy
import re
import glob
import sys
import logging
import platform
import json
import urllib
import urllib2
import time
import xmlrpclib
from mss.agent.config import Config
from mss.agent.lib.utils import Singleton
from mss.agent.lib.db import get_session, OptionTable, LogTypeTable, LogTable, ModuleTable
from mss.agent.managers.process import ProcessManager
from mss.agent.managers.translation import TranslationManager
from mss.agent.classes.media import remove_medias_cmd
_ = TranslationManager().translate
logger = logging.getLogger(__name__)
def expose(f):
"Decorator to set exposed flag on a function."
f.exposed = True
return f
def is_exposed(f):
"Test whether another function should be publicly exposed."
return getattr(f, 'exposed', False)
class ModuleManager:
"""
Class for managing modules
"""
__metaclass__ = Singleton
def _dispatch(self, method, params):
func = getattr(self, method)
if not is_exposed(func):
raise Exception('Method "%s" is not supported' % method)
return func(*params)
def __init__(self):
if platform.machine() == 'x86_64':
self.arch = 'x86_64'
else:
self.arch = 'i586'
# Setup BDD access
self.session = get_session(Config().db_file)
self._token = False
self._mode = None
self.modules = {}
self.sections_modules = {}
self.sections = {}
self.packages = []
# Get machine-id
with open('/etc/machine-id', 'r') as f:
machine_id = f.read().strip()
logger.info("Machine id is %s" % machine_id)
self.set_option("machine-id", machine_id)
# Translation manager
TranslationManager().set_catalog('agent', os.path.join(os.path.dirname(__file__), '..'))
# Load packages
self.load_packages()
def load(self):
""" Load data in the agent """
self.modules = {}
self.sections_modules = {}
self.sections = {}
self.load_sections()
logger.debug("Sections loaded.")
self.load_modules()
logger.debug("Modules loaded.")
self.init_modules()
logger.debug("Modules init done.")
def setup_python_path(self):
"""
Setup the python path to load modules
"""
local_path = Config().localDir
cache_path = Config().cacheDir
try:
sys.path.remove(local_path)
except ValueError:
pass
try:
sys.path.remove(cache_path)
except ValueError:
pass
sys.path.insert(0, local_path)
if self._mode == "api":
sys.path.insert(0, cache_path)
def load_modules(self):
""" load modules """
logger.debug("Using local modules")
modules_list = self.get_local_modules()
if self._mode == "api":
logger.debug("Using API modules")
modules_list += self.get_api_modules()
self.setup_python_path()
from mss.agent.classes.module import Module
for module_desc in modules_list:
if "module" in module_desc:
if "path" not in module_desc["module"]:
module_desc["module"]["path"] = os.path.join(Config().cacheDir,
module_desc["slug"])
self.modules[module_desc['slug']] = Module(module_desc)
section = self.modules[module_desc['slug']].section
if section not in self.sections_modules:
self.sections_modules[section] = []
if not module_desc["slug"] in self.sections_modules[section]:
self.sections_modules[section].append(module_desc["slug"])
def init_modules(self):
for slug, module in self.modules.items():
if hasattr(module, "init"):
module.init()
def get_local_modules(self):
paths = []
result = []
for item in glob.glob(os.path.join(Config().localDir,
"*", "__init__.py")):
module = item.split("/")[-2]
path = os.path.join(Config().localDir, module)
paths.append(path)
for path in paths:
try:
with open(os.path.join(path, "desc.json")) as f:
desc = json.load(f)
except (ValueError, IOError):
logger.exception("Failed to load %s" % (path))
else:
if "module" not in desc:
if desc['standalone'] is True:
raise Exception('Missing section "module" in module %s' % desc['slug'])
else:
desc["module"] = {}
desc["module"]["path"] = path
result.append(desc)
return result
def get_api_modules(self):
""" return list of modules from the API """
cache_path = os.path.join(Config().cacheDir, "addons.json")
try:
mtime = os.path.getmtime(cache_path)
except OSError:
mtime = 0
# Cache 6 hours
if int(time.time()) - mtime > Config().cache:
logger.debug("Getting new version of %s" % cache_path)
result, code = self.request(Config().addonsUrl)
if code == 200:
with open(cache_path, "w") as f:
json.dump(result, f)
modules_list = result
else:
logger.error("Failed to retrieve modules from the API.")
else:
with open(cache_path) as f:
modules_list = json.load(f)
return modules_list
def load_sections(self):
""" load sections """
logger.debug("Using local sections")
sections = self.get_local_sections()
if self._mode == "api":
logger.debug("Using API sections")
api_sections = self.get_api_sections()
for section in sections:
for api_section in api_sections:
if section['slug'] == api_section['slug']:
section.update(api_section)
self.sections = sections
def get_local_sections(self):
""" return local section list """
path = os.path.join(Config().localDir, "sections.json")
with open(path) as f:
sections = json.load(f)
return sections
def get_api_sections(self):
""" return section list from API """
cache_path = os.path.join(Config().cacheDir, "sections.json")
try:
mtime = os.path.getmtime(cache_path)
except OSError:
mtime = 0
# Cache 6 hours
if int(time.time()) - mtime > Config().cache:
logger.debug("Getting new version of %s" % cache_path)
result, code = self.request(Config().sectionsUrl)
if code == 200:
with open(cache_path, "w") as f:
json.dump(result, f)
sections = result
else:
logger.error("Failed to retrieve sections from the API.")
logger.error("Using local sections.")
sections = self.get_local_sections()
else:
with open(cache_path) as f:
sections = json.load(f)
return sections
@expose
def set_lang(self, lang):
""" change lang during execution """
TranslationManager().set_lang(lang)
@expose
def get_lang(self):
""" return current language """
return TranslationManager().get_lang()
@expose
def set_option(self, slug, value):
""" add an option in the DB """
option = OptionTable(slug, value)
self.session.merge(option)
self.session.commit()
return value
@expose
def get_option(self, slug):
""" get an option from the BDD """
logger.debug("Get option %s" % slug)
option = self.session.query(OptionTable).get(slug)
if option:
result = json.loads(option.value)
else:
result = False
logger.debug("Result: %s" % result)
return result
@expose
def load_packages(self):
logger.info("Load packages...")
ProcessManager().load_packages(self.set_packages)
def set_packages(self, module, code, output):
if code == 0:
packages = output.split('#')
if not packages:
logger.error("No packages found.")
else:
self.packages = packages
logger.info("Loading packages done.")
else:
logger.error("Can't load packages.")
@expose
def check_net(self):
ProcessManager().check_net()
@expose
def update_medias(self):
ProcessManager().update_medias()
@expose
def reboot(self):
ProcessManager().reboot()
def get_conflicts(self, conflicts, module):
""" return a module list of current conflicts
with module """
if module in self.modules:
module = self.modules[module]
_conflicts = module.conflicts
_dependencies = module.dependencies
_configured = module.configured
else:
module = self._hAddons[module]
_conflicts = module['module'].get('conflicts', [])
_dependencies = module['module'].get('dependencies', [])
_configured = module['module'].get('configured', False)
for m in _conflicts:
try:
if m not in conflicts and _configured:
conflicts.append(m)
logger.debug("Conflict with : %s" % m)
conflicts = self.get_conflicts(conflicts, m)
except KeyError:
pass
for m in _dependencies:
conflicts = self.get_conflicts(conflicts, m)
return conflicts
@expose
def get_modules(self):
""" return all available modules details """
logger.info("Get all available modules")
result = [module.details for slug, module in self.modules.items()]
logger.debug("Result: %s" % str(result))
return result
@expose
def get_modules_details(self, modules):
""" return modules info """
logger.info("Get modules details: %s" % str(modules))
result = [self.modules[slug].details for slug in modules if slug in self.modules]
logger.debug("Result: %s" % str(result))
return result
@expose
def get_module_details(self, module):
""" return module info """
logger.info("Get module detail: %s" % module)
if module in self.modules:
result = self.modules[module].details
else:
logger.error("Module %s doesn't exists" % module)
result = False
logger.debug("Result: %s" % str(result))
return result
@expose
def get_packages(self, module):
""" returns package list for module """
if module in self.modules:
return self.modules[module].packages
return False
@expose
def preinstall_modules(self, install_modules):
"""
get dependencies for modules to install
return modules infos
"""
# force module re-installation
# (not-used for now)
#force_modules = []
#for m in modules:
#if m.startswith("force-"):
#force_modules.append(m.replace("force-", ""))
#modules = [m.replace("force-", "") for m in modules]
logger.info("Pre-install modules: %s" % ", ".join(install_modules))
# store old modules list
# get dependencies for modules
modules = self.check_dependencies(install_modules, [])
modules = self.order_dependencies(modules)
# get difference for dep list
deps = list(set(modules).difference(install_modules))
# get modules info (modules + dependencies)
modules = self.get_modules_details(modules)
to_install = []
for m in modules:
# don't install already configured deps
if m['slug'] in deps and not m['configured']:
m['dep'] = True
to_install.append(m)
elif m['slug'] not in deps and m['can_configure']:
m['dep'] = False
to_install.append(m)
logger.debug("Result: %s" % to_install)
return to_install
def order_dependencies(self, modules, cnt=1):
for module in modules:
# if the module has dependencies and is not indexed
if module[1] and module[2] == -1:
# for each dep of current module
set_index = True
for m1 in module[1]:
# for each module
for m2 in modules:
# if the dep is not indexed (not >=0)
if m1 == m2[0] and not m2[2] >= 0:
set_index = False
# set the current module index to cnt
# if all dependencies are indexed
if set_index:
module[2] = cnt
# make 10 pass to determine indexes
# FIXME! this limits the nb max of the modules list
if(cnt < 10):
cnt += 1
modules = self.order_dependencies(modules, cnt)
# calcule module list from indexes
else:
result = []
for i in range(cnt):
for module in modules:
if module[2] == i:
if not module[0] in result:
result.append(module[0])
modules = result
return modules
def check_dependencies(self, modules, dependencies):
""" get dependencies for modules
create a list with the form : [ [ module, [dependencies], index ],... ]
"""
for module in modules:
deps = self.get_dependencies(module)
if deps:
# set the index a -1 to calculate index
dependencies.append([module, deps, -1])
dependencies = self.check_dependencies(deps, dependencies)
else:
# set the index at 0 as the module has no dependencies
dependencies.append([module, None, 0])
return dependencies
def get_dependencies(self, module):
""" get dependencies for module """
if module in self.modules:
return [d for d in self.modules[module].dependencies if d in self.modules]
return []
@expose
def download_modules(self, modules):
""" download modules from the API """
for module in modules:
self.download_module(module)
@expose
def download_module(self, module):
logger.debug("Download module: %s" % module)
self.modules[module].download()
@expose
def get_repositories(self, modules):
""" get repositories for modules """
logger.debug("Get packages repositories for modules: %s" % ", ".join(modules))
repositories = []
for module in modules:
repositories += self.modules[module].repositories
logger.debug("Result: %s" % repositories)
return repositories
@expose
def add_repository(self, module_slug, repo_slug, login=None, passwd=None):
""" add repository of a module """
repositories = self.modules[module_slug].repositories
for repository in repositories:
if repository.slug == repo_slug:
if repository.clean:
p = ProcessManager().launch("repository", _("Removing medias"), remove_medias_cmd())
p.join()
logger.info("Add repository: %s" % repository.name)
ProcessManager().add_repository(repository.get_command(login, passwd))
@expose
def install_modules(self, modules):
""" install modules packages """
logger.info("Install modules: %s" % str(modules))
packages = []
for module in modules:
packages += self.modules[module].packages
if packages:
logger.debug("Install packages: %s" % str(packages))
ProcessManager().install_packages(packages)
return True
else:
logger.info("No packages to install")
return False
@expose
def get_config(self, modules):
""" get modules config """
logger.info("Get config for modules: %s" % ", ".join(modules))
config = []
for module in modules:
if module in self.modules:
config.append(self.modules[module].get_config())
else:
logger.error("Module %s is not available" % module)
logger.debug("Result: %s" % str(config))
return config
@expose
def valid_config(self, modules, modules_config):
""" validate user configuration for modules """
logger.info("Valid config for modules: %s" % ", ".join(modules))
logger.debug("Configuration is: %s" % str(modules_config))
config = []
for module in modules:
module_config = self.modules[module].valid_config(modules_config)
config.append(module_config)
logger.debug("Result: %s" % str(config))
return config
@expose
def run_config(self, module):
""" run configuration for module """
logger.debug("Run configuration for %s" % str(module))
path, script, args = self.modules[module].info_config()
logger.debug("Run script: %s, args: %s" % (str(script), str(args)))
logger.debug("Path is: %s" % path)
return ProcessManager().run_script(script, args, path, module, self.end_config)
@expose
def end_config(self, module, code, output):
"""
Callback after run script
"""
if code == 0 and not self.modules[module].configured:
logger.debug("Set %s as configured" % str(module))
self.modules[module].configured = True
# try to store the config log
try:
log_type = self.session.query(LogTypeTable).filter(LogTypeTable.name == "config").first()
if not log_type:
log_type = LogTypeTable("config")
self.session.add(log_type)
self.session.commit()
module_obj = self.session.query(ModuleTable).filter(ModuleTable.name == module).first()
config_log = LogTable(log_type.id, module_obj.id, self.get_state("config", module))
logger.debug("Saving %s configuration log in the DB" % str(module))
self.session.add(config_log)
self.session.commit()
except:
pass
return 0
def clean_output(self, string):
# remove ANSI codes
string = re.sub('\x1b[^m]*m', '', string)
return string
@expose
def get_state(self, type, module="agent"):
""" return execution output """
code, output = ProcessManager().p_state(type, module)
# format output
tmp = output.splitlines()
if not tmp:
output = [{'code': 0, 'text': u''}]
else:
output = []
for line in tmp:
try:
if int(line[0]) in range(9):
text_code = line[0]
text = line[1:]
else:
text_code = 0
text = line
output.append({'code': text_code, 'text': self.clean_output(text)})
# no code at line start
except ValueError:
text_code = 0
text = line
output.append({'code': text_code, 'text': self.clean_output(text)})
# no char in line
except IndexError:
pass
logger.debug("Get state: %s - %s" % (code, output))
return (code, output)
@expose
def get_status(self):
""" return current agent status """
status = []
statuses = ProcessManager().pm_state()
for sts in statuses:
status.append(_(sts, "agent"))
return ', '.join(status)
@expose
def get_sections(self):
""" return list of sections """
sections = copy.deepcopy(self.sections)
for section in sections:
section["name"] = _(section["name"], "agent")
return sections
@expose
def get_section(self, section):
""" return modules belonging to section
organized by category
"""
logger.info("Getting section %s modules" % section)
result = []
if section in self.sections_modules:
modules_list = self.sections_modules[section]
for module_slug in modules_list:
if self.modules[module_slug].standalone:
category = self.modules[module_slug].category
details = self.modules[module_slug].details
exist = False
for cat in result:
if category["slug"] == cat["slug"]:
exist = True
break
if not exist:
result.append(category)
for i, cat in enumerate(result[:]):
if category["slug"] == cat["slug"]:
if "modules" not in cat:
result[i]["modules"] = []
result[i]["modules"].append(details)
break
logger.debug("Result: %s" % str(result))
return result
@expose
def authenticate(self, user, password):
""" Authenticate mss-www to the agent """
if not user or not password:
return False
# Logout the current user
self.logout()
# Local auth with PAM
if user == "root":
logger.debug("PAM authentication")
from mss.agent.lib import pam
result = pam.authenticate(user, password, service="passwd")
if result:
logger.debug("Logged with PAM.")
# Generate an uuid for this session
self._token = str(uuid.uuid4())
self._mode = "local"
self.load()
return self._token
logger.error("Login failed against PAM.")
return False
# API auth
else:
logger.debug("ServicePlace authentication")
url = Config().tokenUrl
result, code = self.request(url, {'username': user, 'password': password.encode('utf-8')})
if code == 200:
if 'token' in result:
logger.debug("Logged with the ServicePlace !")
self._token = result['token']
self._mode = "api"
self.load()
return self._token
logger.error("Login failed against the ServicePlace.")
return False
def check_token(self, token):
if not self._token:
return False
if not token:
return False
return token == self._token
@expose
def logout(self):
self._token = False
self._mode = None
logger.info("User logged out")
def request(self, url, params=None):
"""
Used to query the ServicePlace API
Handles token and language headers
"""
if params:
params = urllib.urlencode(params)
request = urllib2.Request(url, params)
if self._token:
request.add_header('Authorization', 'Token ' + self._token)
request.add_header('Accept-Language', TranslationManager().get_lang().split('_')[0] + ',en')
try:
response = urllib2.urlopen(request)
if response.info().gettype() == "application/json":
result = json.loads(response.read())
else:
result = response.read()
code = response.getcode()
except urllib2.HTTPError as e:
code = e.code
result = ""
if code in (404, 500):
raise xmlrpclib.Fault(code, _("Connection failed with the ServicePlace.", "agent"))
except urllib2.URLError as e:
logger.exception("URL error")
raise xmlrpclib.Fault(777, str(e.reason))
logger.debug("Return code %s" % code)
return (result, code)
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n'''CREMA structured chord model'''\n\nimport argparse\nimport sys\nimport os\nimport pickle\n\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\nfrom jams.util import smkdirs\n\nimport pumpp\n\nimport crema.utils\n\nOUTPUT_PATH = 'resources'\n\n\ndef ... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n'''CREMA structured chord model'''\n\nimport argparse\nimport sys\nimport os\nimport pickle\n\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\nfrom jams.util import smkdirs\n\nimport pumpp\n\nimport crema.utils\n\nOUTPUT_PATH = 'reso... | ```python
#!/usr/bin/env python
'''CREMA structured chord model'''
import argparse
import sys
import os
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
from jams.util import smkdirs
import pumpp
import crema.utils
OUTPUT_PATH = 'resources'
def process_arguments(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--sample-rate', dest='sr', type=float, default=44100.,
help='Sampling rate for audio analysis')
parser.add_argument('--hop-length', dest='hop_length', type=int,
default=4096,
help='Hop length for audio analysis')
parser.add_argument('--octaves', dest='n_octaves', type=int,
default=6,
help='Number of octaves above C1')
parser.add_argument('--jobs', dest='n_jobs', type=int,
default=1,
help='Number of jobs to run in parallel')
parser.add_argument('--augmentation-path', dest='augment_path', type=str,
default=None,
help='Path for augmented data (optional)')
parser.add_argument('input_path', type=str,
help='Path for directory containing (audio, jams)')
parser.add_argument('output_path', type=str,
help='Path to store pump output')
return parser.parse_args(args)
def make_pump(sr, hop_length, n_octaves):
p_feature = pumpp.feature.HCQTMag(name='cqt',
sr=sr, hop_length=hop_length,
harmonics=[1, 2],
log=True, conv='tf', n_octaves=n_octaves)
p_chord_tag = pumpp.task.ChordTagTransformer(name='chord_tag',
sr=sr, hop_length=hop_length,
sparse=True)
p_chord_struct = pumpp.task.ChordTransformer(name='chord_struct',
sr=sr, hop_length=hop_length,
sparse=True)
pump = pumpp.Pump(p_feature, p_chord_tag, p_chord_struct)
# Save the pump
with open(os.path.join(OUTPUT_PATH, 'pump.pkl'), 'wb') as fd:
pickle.dump(pump, fd)
return pump
def convert(aud, jam, pump, outdir):
data = pump.transform(aud, jam)
fname = os.path.extsep.join([os.path.join(outdir, crema.utils.base(aud)),
'h5'])
crema.utils.save_h5(fname, **data)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
smkdirs(OUTPUT_PATH)
smkdirs(params.output_path)
print('{}: pre-processing'.format(__doc__))
print(params)
pump = make_pump(params.sr, params.hop_length, params.n_octaves)
stream = tqdm(crema.utils.get_ann_audio(params.input_path),
desc='Converting training data')
Parallel(n_jobs=params.n_jobs)(delayed(convert)(aud, ann,
pump,
params.output_path)
for aud, ann in stream)
if params.augment_path:
stream = tqdm(crema.utils.get_ann_audio(params.augment_path),
desc='Converting augmented data')
Parallel(n_jobs=params.n_jobs)(delayed(convert)(aud, ann,
pump,
params.output_path)
for aud, ann in stream)
``` |
[
{
"content": "Repeat the following code:\n```python\n#!/usr/bin/python\n\"\"\"\nUnittests for utils_libvirtd module.\n\"\"\"\nimport unittest\nimport common\nfrom virttest import utils_selinux\nfrom autotest.client import os_dep\n\nclass TestSelinux(unittest.TestCase):\n \"\"\"\n Class for unittests of ut... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n#!/usr/bin/python\n\"\"\"\nUnittests for utils_libvirtd module.\n\"\"\"\nimport unittest\nimport common\nfrom virttest import utils_selinux\nfrom autotest.client import os_dep\n\nclass TestSelinux(unittest.TestCase):\n \"\"\"\n Class for... | ```python
#!/usr/bin/python
"""
Unittests for utils_libvirtd module.
"""
import unittest
import common
from virttest import utils_selinux
from autotest.client import os_dep
class TestSelinux(unittest.TestCase):
"""
Class for unittests of utils_selinux.
"""
def test_sestatus(self):
"""
Test the method related with sestatus.
"""
status = utils_selinux.get_status()
#b/c there is no assertIn method in re.py in python2.6.
#use assertTrue.
self.assertTrue(status in ['enforcing', 'permissive', 'disabled'])
if utils_selinux.is_disabled():
self.assertRaises(utils_selinux.SelinuxError,
utils_selinux.set_status, "enforcing")
else:
self.assertRaises(utils_selinux.SelinuxError,
utils_selinux.set_status, "disabled")
def test_is_or_not_disabled(self):
"""
Test the method about selinux disabled.
"""
is_disabled = utils_selinux.is_disabled()
self.assertIn(is_disabled, [True, False])
is_not_disabled = utils_selinux.is_not_disabled()
self.assertIn(is_not_disabled, [True, False])
self.assertEqual(not is_disabled, is_not_disabled)
def test_context(self):
"""
Test the context related method.
"""
output = "output system_u:object_r:svirt_t:s0-s1:c250,c280 test"
result = utils_selinux.get_context_from_str(string=output)
self.assertEqual(result, "system_u:object_r:svirt_t:s0-s1:c250,c280")
result = utils_selinux.get_context_of_file(filename=__file__)
utils_selinux.set_context_of_file(filename=__file__, context=result)
utils_selinux.get_context_of_process(pid=1)
if __name__ == '__main__':
try:
os_dep.command("getsebool")
except ValueError:
#There is no selinux on host,
#so this unittest will be skipped.
pass
else:
unittest.main()
``` |
[
{
"content": "Repeat the code precisely:\n```python\nfrom __future__ import absolute_import, unicode_literals\n\n######################\n# MEZZANINE SETTINGS #\n######################\n\n# The following settings are already defined with default values in\n# the ``defaults.py`` module within each of Mezzanine's ... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom __future__ import absolute_import, unicode_literals\n\n######################\n# MEZZANINE SETTINGS #\n######################\n\n# The following settings are already defined with default values in\n# the ``defaults.py`` module within each... | ```python
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost',]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"moderna_app", # This is a template I am using.
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
#"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username for host deploying to
# "HOSTS": ALLOWED_HOSTS[:1], # List of hosts to deploy to (eg, first host)
# "DOMAINS": ALLOWED_HOSTS, # Domains for public site
# "REPO_URL": "ssh://hg@bitbucket.org/user/project", # Project's repo URL
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
####################
# HSBSITE SETTINGS #
####################
SITE_TITLE = 'hbanner'
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# -*- coding: utf-8 -*-\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom wms.models import Dataset, Layer, VirtualLayer, Variable\nfrom wmsrest.serializers import ... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom wms.models import Dataset, Layer, VirtualLayer, Variable\nfrom wmsrest.ser... | ```python
# -*- coding: utf-8 -*-
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from wms.models import Dataset, Layer, VirtualLayer, Variable
from wmsrest.serializers import DatasetSerializer, SGridDatasetSerializer, UGridDatasetSerializer, RGridDatasetSerializer, LayerSerializer, VirtualLayerSerializer, VariableSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from django.http import Http404
class DatasetList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Dataset.objects.select_related().all()
serializer = DatasetSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DatasetDetail(APIView):
"""
Get or update a specific Sci-WMS dataset.
Supports GET, PUT, DELETE, and PATCH methods.
A DELETE on a dataset with a defined m2m relationship
to another table will also delete that relationship.
PUT and PATCH requests with a defined m2m relations
to another table will be updated accordingly.
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
def get_object(self, pk):
try:
return Dataset.objects.get(pk=pk)
except Dataset.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
dataset = self.get_object(pk)
serializer = DatasetSerializer(dataset)
return Response(serializer.data)
def put(self, request, pk, format=None):
dataset = self.get_object(pk)
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(dataset, data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(dataset, data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(dataset, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
dataset = self.get_object(pk)
dataset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = LayerSerializer
queryset = Layer.objects.all()
class VirtuallLayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VirtualLayerSerializer
queryset = VirtualLayer.objects.all()
class DefaultDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VariableSerializer
queryset = Variable.objects.all()
class DefaultList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Variable.objects.all()
serializer = VariableSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = VariableSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
``` |
[
{
"content": "Here is the source code:\n```python\n###############################################################################\n# Language Modeling on Wikitext-2\n#\n# This file generates new sentences sampled from the language model\n#\n######################################################################... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n###############################################################################\n# Language Modeling on Wikitext-2\n#\n# This file generates new sentences sampled from the language model\n#\n######################################################... | ```python
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
# stdlib
import argparse
# third party
import torch
import data # isort:skip
parser = argparse.ArgumentParser(description="PyTorch Wikitext-2 Language Model")
# Model parameters.
parser.add_argument(
"--data",
type=str,
default="./data/wikitext-2",
help='location of the data corpus; default: "./data/wikitext-2"',
)
parser.add_argument(
"--checkpoint",
type=str,
default="./model.pt",
help='model checkpoint to use; default: "./model.pt"',
)
parser.add_argument(
"--outf",
type=str,
default="generated.txt",
help='output file for generated text; default: "generated.txt"',
)
parser.add_argument(
"--words",
type=int,
default="1000",
help="number of words to generate; default: 1000",
)
parser.add_argument("--seed", type=int, default=1111, help="random seed; default: 1111")
parser.add_argument("--cuda", action="store_true", help="use CUDA")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature - higher will increase diversity; default: 1.0",
)
parser.add_argument(
"--log-interval", type=int, default=100, help="reporting interval; default: 100"
)
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, "rb") as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = (
hasattr(model, "model_type") and model.model_type == "Transformer"
)
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, "w") as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ("\n" if i % 20 == 19 else " "))
if i % args.log_interval == 0:
print(f"| Generated {i}/{args.words} words")
``` |
[
{
"content": "Repeat the following code:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"Setup the Brie application\"\"\"\n\nimport logging\n\nimport transaction\nfrom tg import config\n\nfrom brie.config.environment import load_environment\n\n__all__ = ['setup_app']\n\nlog = logging.getLogger(__name__)\n\n\ndef set... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"Setup the Brie application\"\"\"\n\nimport logging\n\nimport transaction\nfrom tg import config\n\nfrom brie.config.environment import load_environment\n\n__all__ = ['setup_app']\n\nlog = logging.getLogger(__name... | ```python
# -*- coding: utf-8 -*-
"""Setup the Brie application"""
import logging
import transaction
from tg import config
from brie.config.environment import load_environment
__all__ = ['setup_app']
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup brie here"""
load_environment(conf.global_conf, conf.local_conf)
# Load the models
from brie import model
print "Creating tables"
model.metadata.create_all(bind=config['pylons.app_globals'].sa_engine)
manager = model.User()
manager.user_name = u'manager'
manager.display_name = u'Example manager'
manager.email_address = u'manager@somedomain.com'
manager.password = u'managepass'
model.DBSession.add(manager)
group = model.Group()
group.group_name = u'managers'
group.display_name = u'Managers Group'
group.users.append(manager)
model.DBSession.add(group)
permission = model.Permission()
permission.permission_name = u'manage'
permission.description = u'This permission give an administrative right to the bearer'
permission.groups.append(group)
model.DBSession.add(permission)
editor = model.User()
editor.user_name = u'editor'
editor.display_name = u'Example editor'
editor.email_address = u'editor@somedomain.com'
editor.password = u'editpass'
model.DBSession.add(editor)
model.DBSession.flush()
transaction.commit()
print "Successfully setup"
``` |
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('exchange', '0011_conferenceroom'),\n ]\n\n operations = [\n migr... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('exchange', '0011_conferenceroom'),\n ]\n\n operations =... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('exchange', '0011_conferenceroom'),
]
operations = [
migrations.AddField(
model_name='group',
name='delivery_members',
field=models.ManyToManyField(related_name='+', to='exchange.User'),
preserve_default=True,
),
migrations.AddField(
model_name='group',
name='senders_out',
field=models.BooleanField(default=False, help_text=b'Delivery management for senders outside organizational unit'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='send_as_members',
field=models.ManyToManyField(related_name='send_as_members_rel_+', to='exchange.User'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='send_on_behalf_members',
field=models.ManyToManyField(related_name='send_on_behalf_members_rel_+', to='exchange.User'),
preserve_default=True,
),
migrations.AlterField(
model_name='group',
name='members',
field=models.ManyToManyField(related_name='+', to='exchange.User'),
preserve_default=True,
),
]
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom zope.interface import implementer\nfrom sqlalchemy import (\n Column,\n Unicode,\n Integer,\n ForeignKey,\n)\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.declarative import declared_attr\n\nfrom clld import interf... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom zope.interface import implementer\nfrom sqlalchemy import (\n Column,\n Unicode,\n Integer,\n ForeignKey,\n)\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.declarative import declared_attr\n\nfrom cl... | ```python
from zope.interface import implementer
from sqlalchemy import (
Column,
Unicode,
Integer,
ForeignKey,
)
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declared_attr
from clld import interfaces
from clld.db.meta import Base, CustomModelMixin
from clld.db.models.common import Sentence, Contribution, Unit, IdNameDescriptionMixin
#-----------------------------------------------------------------------------
# specialized common mapper classes
#-----------------------------------------------------------------------------
"""
CREATE TABLE "texts_data_dictionary_entry" (
"id" integer NOT NULL PRIMARY KEY,
"Number" integer NOT NULL,
"Value" varchar(50) NOT NULL,
"Part_of_Speech" varchar(50) NOT NULL,
"Gloss" varchar(50) NOT NULL,
"Notes" text NOT NULL,
"Changed" date NOT NULL
);
CREATE TABLE "texts_data_glossary" (
"id" integer NOT NULL PRIMARY KEY,
"Number" integer NOT NULL,
"Value" varchar(50) NOT NULL,
"Part_of_Speech" varchar(50) NOT NULL,
"Gloss" varchar(50) NOT NULL,
"Notes" text NOT NULL,
"Changed" date NOT NULL
);
CREATE TABLE "texts_data_line" (
"id" integer NOT NULL PRIMARY KEY,
"to_Text_id" integer NOT NULL REFERENCES "texts_data_text" ("id"),
"Line_Position" integer NOT NULL,
"Tsez_Line" varchar(400) NOT NULL,
"English_Translation" varchar(400) NOT NULL,
"Russian_Translation" varchar(400) NOT NULL
);
CREATE TABLE "texts_data_morpheme" (
"id" integer NOT NULL PRIMARY KEY,
"to_Word_id" integer NOT NULL REFERENCES "texts_data_word" ("id"),
"Position" integer NOT NULL,
"Value" varchar(10) NOT NULL,
"Gloss" varchar(10) NOT NULL,
"Part_of_Speech" varchar(10) NOT NULL
);
CREATE TABLE "texts_data_text" (
"id" integer NOT NULL PRIMARY KEY,
"Number" integer NOT NULL,
"Title_in_Tsez" varchar(200) NOT NULL,
"Title_in_English" varchar(200) NOT NULL,
"Title_in_Russian" varchar(200) NOT NULL
);
CREATE TABLE "texts_data_word" (
"id" integer NOT NULL PRIMARY KEY,
"to_Line_id" integer NOT NULL REFERENCES "texts_data_line" ("id"),
"Lex_Position" integer NOT NULL,
"Word_in_Phrase" varchar(20) NOT NULL,
"Word_Clear" varchar(15) NOT NULL
);
"""
@implementer(interfaces.IContribution)
class Text(CustomModelMixin, Contribution):
pk = Column(Integer, ForeignKey('contribution.pk'), primary_key=True)
ord = Column(Integer, nullable=False)
russian = Column(Unicode)
@implementer(interfaces.ISentence)
class Line(CustomModelMixin, Sentence):
pk = Column(Integer, ForeignKey('sentence.pk'), primary_key=True)
ord = Column(Integer, nullable=False)
text_pk = Column(Integer, ForeignKey('text.pk'))
russian = Column(Unicode)
@declared_attr
def text(cls):
return relationship(Text, backref=backref('lines', order_by=cls.ord))
class WordInLine(Base, IdNameDescriptionMixin):
line_pk = Column(Integer, ForeignKey('line.pk'))
ord = Column(Integer, nullable=False)
@declared_attr
def line(cls):
return relationship(Line, backref=backref('words', order_by=cls.ord))
@implementer(interfaces.IUnit)
class Morpheme(CustomModelMixin, Unit):
pk = Column(Integer, ForeignKey('unit.pk'), primary_key=True)
pos = Column(Unicode)
notes = Column(Unicode)
class MorphemeInWord(Base, IdNameDescriptionMixin):
word_pk = Column(Integer, ForeignKey('wordinline.pk'))
ord = Column(Integer, nullable=False)
pos = Column(Unicode)
normgloss = Column(Unicode)
morpheme_pk = Column(Integer, ForeignKey('morpheme.pk'))
morpheme = relationship(Morpheme, backref='occurrences')
@declared_attr
def word(cls):
return relationship(WordInLine, backref=backref('morphemes', order_by=cls.ord))
``` |
[
{
"content": "```python\n# Generated by YCM Generator at 2015-07-01 14:51:38.867126\n\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered software released into the public domain.\n#... | [
{
"content": "<|memory_start|>```python\n# Generated by YCM Generator at 2015-07-01 14:51:38.867126\n\n# This file is NOT licensed under the GPLv3, which is the license for the rest\n# of YouCompleteMe.\n#\n# Here's the license text for this file:\n#\n# This is free and unencumbered software released into the p... | ```python
# Generated by YCM Generator at 2015-07-01 14:51:38.867126
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c++',
'-DETL_VECTORIZE_FULL',
'-DNDEBUG',
'-ICatch/include',
'-Ietl/include/',
'-Ietl/lib/include',
'-Iinclude',
'-Imnist/include/',
'-Inice_svm/include',
'-Wall',
'-Wdocumentation',
'-Werror',
'-Wextra',
'-Winit-self',
'-Wno-documentation',
'-Wno-long-long',
'-Wsometimes-uninitialized',
'-Wuninitialized',
'-std=c++1y',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n'''\nVisualOps agent requests objects\n(c) 2014 - MadeiraCloud LTD.\n\n@author: Thibault BRONCHAIN\n'''\n\n\n# Protocol defines import\nfrom opsagent.objects import codes\n\n\n# Handshake request\ndef handshake(config, errors):\n init = conf... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n'''\nVisualOps agent requests objects\n(c) 2014 - MadeiraCloud LTD.\n\n@author: Thibault BRONCHAIN\n'''\n\n\n# Protocol defines import\nfrom opsagent.objects import codes\n\n\n# Handshake request\ndef handshake(config, errors):\... | ```python
'''
VisualOps agent requests objects
(c) 2014 - MadeiraCloud LTD.
@author: Thibault BRONCHAIN
'''
# Protocol defines import
from opsagent.objects import codes
# Handshake request
def handshake(config, errors):
init = config.get('init')
version = (config['userdata'].get('version') if config.get('userdata') else None)
if type(init) is not dict: init={}
return ({
"code" : codes.HANDSHAKE,
"instance_id" : init.get('instance_id'),
"app_id" : init.get('app_id'),
"agent_version" : version,
"protocol_version" : codes.PROTOCOL_VERSION,
"instance_token" : init.get('instance_token'),
"init_errors" : ("; ".join(errors) if errors else None),
})
# Statelog request
def statelog(init, version, sid, result, comment, out_log):
return ({
"code" : codes.STATELOG,
"instance_id" : init.get('instance_id'),
"app_id" : init.get('app_id'),
"recipe_version" : version,
"id" : sid,
"state_result" : result,
"state_comment" : comment,
"state_stdout" : out_log
})
# Test request
def test(config, errors=None):
init = config.get('init')
version = (config['userdata'].get('version') if config.get('userdata') else None)
if type(init) is not dict: init={}
return ({
"code" : codes.TEST,
"instance_id" : init.get('instance_id'),
"app_id" : init.get('app_id'),
"agent_version" : version,
"protocol_version" : codes.PROTOCOL_VERSION,
"instance_token" : init.get('instance_token'),
"init_errors" : ("; ".join(errors) if errors else None),
})
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"A setuptools based setup module.\n\nSee (and based on):\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n#... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"A setuptools based setup module.\n\nSee (and based on):\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, ... | ```python
"""A setuptools based setup module.
See (and based on):
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='lmc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.1',
description='Logarithmantic Monte Carlo',
long_description=long_description,
# The project's main homepage.
url='https://github.com/abmantz/lmc',
# Author details
author='Adam Mantz',
author_email='amantz@slac.stanford.edu',
# Choose your license
license='LGPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
#keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['examples']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
``` |
[
{
"content": "Replicate the source code:\n```python\nfrom office365.sharepoint.publishing.primary_city_time import PrimaryCityTime\nfrom office365.sharepoint.publishing.site_page_metadata_collection import SitePageMetadataCollection\nfrom office365.sharepoint.publishing.site_page_service import SitePageService\... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nfrom office365.sharepoint.publishing.primary_city_time import PrimaryCityTime\nfrom office365.sharepoint.publishing.site_page_metadata_collection import SitePageMetadataCollection\nfrom office365.sharepoint.publishing.site_page_service import ... | ```python
from office365.sharepoint.publishing.primary_city_time import PrimaryCityTime
from office365.sharepoint.publishing.site_page_metadata_collection import SitePageMetadataCollection
from office365.sharepoint.publishing.site_page_service import SitePageService
from office365.sharepoint.publishing.video_service_discoverer import VideoServiceDiscoverer
from tests.sharepoint.sharepoint_case import SPTestCase
class TestSPPublishing(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestSPPublishing, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
def test1_init_site_page_service(self):
svc = SitePageService(self.client).get().execute_query()
self.assertIsNotNone(svc.resource_path)
def test2_get_site_pages(self):
svc = SitePageService(self.client)
pages = svc.pages().get().execute_query()
self.assertIsInstance(pages, SitePageMetadataCollection)
def test3_get_time_zone(self):
time_zone = SitePageService.get_time_zone(self.client, "Moscow").execute_query()
self.assertIsInstance(time_zone, PrimaryCityTime)
self.assertEqual(time_zone.properties.get("Location"), "Moscow, Russia")
def test4_compute_file_name(self):
result = SitePageService.compute_file_name(self.client, "Test page").execute_query()
self.assertIsNotNone(result.value)
def test5_file_picker_tab_options(self):
result = SitePageService.file_picker_tab_options(self.client).execute_query()
self.assertIsNotNone(result.value)
def test6_org_assets(self):
result = SitePageService.org_assets(self.client).execute_query()
self.assertIsNotNone(result.value)
def test7_get_video_service_manager(self):
discoverer = VideoServiceDiscoverer(self.client).get().execute_query()
self.assertIsNotNone(discoverer.resource_path)
``` |
[
{
"content": "```python\nfrom django import template\nfrom django.conf import settings\nimport json\nfrom geotrek.outdoor.models import Practice, Site\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef is_outdoor_enabled():\n return 'geotrek.outdoor' in settings.INSTALLED_APPS\n\n\n@register.s... | [
{
"content": "<|memory_start|>```python\nfrom django import template\nfrom django.conf import settings\nimport json\nfrom geotrek.outdoor.models import Practice, Site\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef is_outdoor_enabled():\n return 'geotrek.outdoor' in settings.INSTALLED_APPS\... | ```python
from django import template
from django.conf import settings
import json
from geotrek.outdoor.models import Practice, Site
register = template.Library()
@register.simple_tag
def is_outdoor_enabled():
return 'geotrek.outdoor' in settings.INSTALLED_APPS
@register.simple_tag
def site_practices():
practices = {
str(practice.pk): {
'types': {
str(type.pk): type.name
for type in practice.types.all()
},
'scales': {
str(scale.pk): {
'name': scale.name,
'ratings': {
str(rating.pk): rating.name
for rating in scale.ratings.all()
},
}
for scale in practice.rating_scales.all()
},
}
for practice in Practice.objects.all()
}
return json.dumps(practices)
@register.filter
def orientation_display(orientation):
return dict(Site.ORIENTATION_CHOICES)[orientation]
@register.filter
def wind_display(orientation):
return dict(Site.WIND_CHOICES)[orientation]
``` |
[
{
"content": "Here is a code snippet:\n```python\n#!/usr/bin/python\n#\n# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.\n#\n\nimport os\nimport sys\nimport argparse\nimport netaddr\nimport netifaces\nimport ConfigParser\nimport platform\n\nfrom fabric.api import local\n\nfrom contrail_provision... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/python\n#\n# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.\n#\n\nimport os\nimport sys\nimport argparse\nimport netaddr\nimport netifaces\nimport ConfigParser\nimport platform\n\nfrom fabric.api import local\n\nfrom co... | ```python
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import argparse
import netaddr
import netifaces
import ConfigParser
import platform
from fabric.api import local
from contrail_provisioning.common.base import ContrailSetup
from contrail_provisioning.compute.network import ComputeNetworkSetup
from contrail_provisioning.common.templates import keepalived_conf_template
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
class KeepalivedSetup(ContrailSetup, ComputeNetworkSetup):
def __init__(self, args_str = None):
super(KeepalivedSetup, self).__init__()
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self.parse_args(args_str)
def parse_args(self, args_str):
'''
Eg. setup-vnc-keepalived --self_ip 10.1.5.11 --mgmt_self_ip 11.1.5.11
--self_index 1 --internal_vip 10.1.5.13 --external_vip 11.1.5.13
'''
parser = self._parse_args(args_str)
parser.add_argument("--role", help = "Role of the node")
parser.add_argument("--self_ip", help = "IP Address of this system")
parser.add_argument("--mgmt_self_ip", help = "Management IP Address of this system")
parser.add_argument("--internal_vip", help = "Internal(private) Virtual IP Addresses of HA nodes"),
parser.add_argument("--external_vip", help = "External(public) Virtual IP Addresses of HA nodes"),
parser.add_argument("--self_index", help = "The index of this HA node", type=int)
parser.add_argument("--num_nodes", help = "Number of available HA node")
parser.add_argument("--internal_virtual_router_id", help = "Internal Virtual router ID", type=int)
parser.add_argument("--external_virtual_router_id", help = "External Virtual router ID", type=int)
self._args = parser.parse_args(self.remaining_argv)
def fixup_config_files(self):
vip_for_ips = [(self._args.internal_vip, self._args.self_ip, 'INTERNAL')]
internal_device=self.get_device_by_ip(self._args.self_ip)
if self._args.external_vip:
vip_for_ips.append((self._args.external_vip, self._args.mgmt_self_ip, 'EXTERNAL'))
ext_device=self.get_device_by_ip(self._args.mgmt_self_ip)
else:
ext_device=internal_device
for vip, ip, vip_name in vip_for_ips:
# keepalived.conf
device = self.get_device_by_ip(ip)
netmask = netifaces.ifaddresses(device)[netifaces.AF_INET][0]['netmask']
prefix = netaddr.IPNetwork('%s/%s' % (ip, netmask)).prefixlen
state = 'BACKUP'
delay = 1
preempt_delay = 1
timeout = 1
rise = 1
fall = 1
garp_master_repeat = 3
garp_master_refresh = 1
ctrl_data_timeout=3
ctrl_data_rise=1
ctrl_data_fall=1
if self._args.self_index == 1:
state = 'MASTER'
delay = 5
preempt_delay = 7
timeout = 3
rise = 2
fall = 2
if vip_name == 'INTERNAL':
router_id = self._args.internal_virtual_router_id
external_device = internal_device
else:
router_id = self._args.external_virtual_router_id
external_device = ext_device
priority = (100 - self._args.self_index)
if self._args.num_nodes > 2 and self._args.self_index == 2:
state = 'MASTER'
vip_str = '_'.join([vip_name] + vip.split('.'))
template_vals = {'__device__': device,
'__router_id__' : router_id,
'__state__' : state,
'__delay__' : delay,
'__garp_master_repeat__' : garp_master_repeat,
'__garp_master_refresh__' : garp_master_refresh,
'__preempt_delay__' : preempt_delay,
'__priority__' : priority,
'__virtual_ip__' : vip,
'__virtual_ip_mask__' : prefix,
'__vip_str__' : vip_str,
'__timeout__' : timeout,
'__rise__' : rise,
'__fall__' : fall,
'__cd_timeout__' : ctrl_data_timeout,
'__cd_rise__' : ctrl_data_rise,
'__cd_fall__' : ctrl_data_fall,
'__internal_device__' : internal_device,
'__external_device__' : external_device,
}
data = self._template_substitute(keepalived_conf_template.template,
template_vals)
with open(self._temp_dir_name + '/keepalived.conf', 'a+') as fp:
fp.write(data)
local("sudo mv %s/keepalived.conf /etc/keepalived/" %(self._temp_dir_name))
def run_services(self):
if PLATFORM.lower() == 'ubuntu':
local("sudo chkconfig keepalived on && sudo service keepalived restart")
else:
local("sudo systemctl enable keepalived && sudo systemctl restart keepalived")
def main(args_str = None):
keepalived = KeepalivedSetup(args_str)
keepalived.setup()
if __name__ == "__main__":
main()
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nfrom __future__ impor... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nfrom ... | ```python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from itertools import product
import pytest
from pymor.discretizers.advection import discretize_nonlinear_instationary_advection_fv
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymortests.fixtures.analyticalproblem import (picklable_thermalblock_problems, non_picklable_thermalblock_problems,
burgers_problems)
picklable_discretizaion_generators = \
[lambda p=p,d=d: discretize_elliptic_cg(p, diameter=d)[0]
for p, d in product(picklable_thermalblock_problems, [1./50., 1./100.])] + \
[lambda p=p,d=d: discretize_nonlinear_instationary_advection_fv(p, diameter=d)[0]
for p, d in product(burgers_problems, [1./10., 1./15.])]
non_picklable_discretization_generators = \
[lambda p=p,d=d: discretize_elliptic_cg(p, diameter=d)[0]
for p, d in product(non_picklable_thermalblock_problems, [1./20., 1./30.])]
discretization_generators = picklable_discretizaion_generators + non_picklable_discretization_generators
@pytest.fixture(params=discretization_generators)
def discretization(request):
return request.param()
@pytest.fixture(params=picklable_discretizaion_generators)
def picklable_discretization(request):
return request.param()
``` |
[
{
"content": "Repeat the following code:\n```python\nfrom . import lib\nfrom .timeout import Timeout\n\nimport greenlet\n\nimport errno\nimport socket as stdsocket\n\nfrom socket import * # for convenience\nfrom socket import timeout as timeout_error\n\n\nclass socket(stdsocket.socket):\n __slots__ = ()\n\n ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nfrom . import lib\nfrom .timeout import Timeout\n\nimport greenlet\n\nimport errno\nimport socket as stdsocket\n\nfrom socket import * # for convenience\nfrom socket import timeout as timeout_error\n\n\nclass socket(stdsocket.socket):\n __s... | ```python
from . import lib
from .timeout import Timeout
import greenlet
import errno
import socket as stdsocket
from socket import * # for convenience
from socket import timeout as timeout_error
class socket(stdsocket.socket):
__slots__ = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setblocking(False)
def __wait(self, events, timeout=None):
try:
with Timeout(timeout if timeout else super().gettimeout()):
lib.Io(fd=self.fileno(), events=events).start()
except TimeoutError:
raise timeout_error
def connect(self, addr, timeout=None):
ret = self.connect_ex(addr)
if ret == 0:
return
if ret != errno.EINPROGRESS:
raise stdsocket.error(ret)
self.__wait(lib.EV_WRITE, timeout)
def send(self, value, timeout=None, *args, **kwargs):
while True:
try:
return super().send(value, *args, **kwargs)
except stdsocket.error as err:
if err.errno not in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
raise
self.__wait(lib.EV_WRITE, timeout)
def sendall(self, value, timeout=None, *args, **kwargs):
while True:
bytes = self.send(value, timeout, *args, **kwargs)
if bytes >= len(value):
return
value = value[bytes:]
def recv(self, size, timeout=None, *args, **kwargs):
while True:
fd = self.fileno()
if fd < 0:
return b''
self.__wait(lib.EV_READ, timeout)
try:
return super().recv(size, *args, **kwargs)
except stdsocket.error as err:
if err.errno in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
continue
raise
def accept(self, timeout=None):
while True:
self.__wait(lib.EV_READ, timeout)
try:
sock, addr = super().accept()
sock.setblocking(False)
sock.__class__ = socket
return sock, addr
except stdsocket.error as err:
if err.errno in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
continue
raise
if __name__ == '__main__':
from .hub import Hub
def get():
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 8000))
sock.send(b'GET / HTTP/1.0\r\n\r\n') # wrong but ok for sample
sock.shutdown(SHUT_WR)
while True:
data = sock.recv(4096)
if not data:
break
print(data)
while True:
with Hub() as hub:
hub.spawn(get)
hub.switch()
``` |
[
{
"content": "Repeat the following code:\n```python\n# -*- coding:utf-8 -*-\n\nfrom report import report_sxw\nfrom tools.translate import _\nimport rml_parse\nimport time\n\nclass rainsoft_saleout_report(rml_parse.rml_parse):\n def __init__(self,cr,uid,name,context):\n\tsuper(rainsoft_saleout_report, self)._... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# -*- coding:utf-8 -*-\n\nfrom report import report_sxw\nfrom tools.translate import _\nimport rml_parse\nimport time\n\nclass rainsoft_saleout_report(rml_parse.rml_parse):\n def __init__(self,cr,uid,name,context):\n\tsuper(rainsoft_saleout... | ```python
# -*- coding:utf-8 -*-
from report import report_sxw
from tools.translate import _
import rml_parse
import time
class rainsoft_saleout_report(rml_parse.rml_parse):
def __init__(self,cr,uid,name,context):
super(rainsoft_saleout_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_partner':self._get_partner,
'get_sale_order':self._get_sale_order,
'get_product_name':self._get_product_name,
'get_mrp_product_name':self._get_mrp_product_name,
'get_uom':self._get_uom,
})
def _get_partner(self,partner_id):
partner = self.pool.get('res.partner').browse(self.cr,self.uid,partner_id.id,None)
return partner.name
def _get_sale_order(self,order_id):
sale_order = self.pool.get('sale.order').browse(self.cr,self.uid,order_id.id,None)
return sale_order.name
def _get_product_name(self,product_id,mrp_product_id):
if mrp_product_id:
return ''
if product_id:
product = self.pool.get('product.product').browse(self.cr,self.uid,product_id.id,None)
return product.name
def _get_mrp_product_name(self,product_id,mrp_product_id):
if not mrp_product_id:
return ''
if product_id:
product = self.pool.get('product.product').browse(self.cr,self.uid,product_id.id,None)
return product.name
def _get_uom(self,uom_id):
if uom_id:
uom = self.pool.get('product.uom').browse(self.cr,self.uid,uom_id.id,None)
return uom.name
report_sxw.report_sxw('report.rainsoft.saleout', 'rainsoft.saleout', \
'Rainsoft_Xiangjie/report/rainsoft_saleout.rml', \
parser=rainsoft_saleout_report,header=False)
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# Generated by Django 2.0.8 on 2018-09-25 15:48\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"product\", \"0071_attributechoicevalue_value\")]\n\n op... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# Generated by Django 2.0.8 on 2018-09-25 15:48\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"product\", \"0071_attributechoicevalue_val... | ```python
# Generated by Django 2.0.8 on 2018-09-25 15:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("product", "0071_attributechoicevalue_value")]
operations = [
migrations.RenameModel(old_name="ProductAttribute", new_name="Attribute"),
migrations.RenameModel(
old_name="AttributeChoiceValueTranslation",
new_name="AttributeValueTranslation",
),
migrations.RenameModel(
old_name="AttributeChoiceValue", new_name="AttributeValue"
),
migrations.RenameModel(
old_name="ProductAttributeTranslation", new_name="AttributeTranslation"
),
migrations.RenameField(
model_name="attributetranslation",
old_name="product_attribute",
new_name="attribute",
),
migrations.RenameField(
model_name="attributevaluetranslation",
old_name="attribute_choice_value",
new_name="attribute_value",
),
migrations.AlterUniqueTogether(
name="attributetranslation",
unique_together={("language_code", "attribute")},
),
migrations.AlterUniqueTogether(
name="attributevaluetranslation",
unique_together={("language_code", "attribute_value")},
),
]
``` |
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n # Changing field 'MessageCost.tag_pool'\n ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n # Changing field 'MessageCost.tag... | ```python
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool'], null=True))
# Adding index on 'MessageCost', fields ['message_direction']
db.create_index(u'billing_messagecost', ['message_direction'])
# Adding unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Adding index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
def backwards(self, orm):
# Removing index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing index on 'MessageCost', fields ['message_direction']
db.delete_index(u'billing_messagecost', ['message_direction'])
# User chose to not deal with backwards NULL issues for 'MessageCost.tag_pool'
raise RuntimeError("Cannot reverse this migration. 'MessageCost.tag_pool' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.gouser': {
'Meta': {'object_name': 'GoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'billing.account': {
'Meta': {'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'alert_credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'alert_threshold': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.GoUser']"})
},
u'billing.lineitem': {
'Meta': {'object_name': 'LineItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_direction': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'statement': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Statement']"}),
'tag_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'billing.messagecost': {
'Meta': {'unique_together': "[['account', 'tag_pool', 'message_direction']]", 'object_name': 'MessageCost', 'index_together': "[['account', 'tag_pool', 'message_direction']]"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'tag_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.TagPool']", 'null': 'True', 'blank': 'True'})
},
u'billing.statement': {
'Meta': {'object_name': 'Statement'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'billing.tagpool': {
'Meta': {'object_name': 'TagPool'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'billing.transaction': {
'Meta': {'object_name': 'Transaction'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credit_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'credit_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'session_created': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Pending'", 'max_length': '20'}),
'tag_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['billing']
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n\"\"\"\nAllow the ability to connect and publish to a queue.\n\"\"\"\nimport logging\nimport time\n\nimport kombu\nimport six\n\n\nclass Producer(object):\n\n def __init__(self, dest_queue_name, rabbitmq_host, rabbi... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n\"\"\"\nAllow the ability to connect and publish to a queue.\n\"\"\"\nimport logging\nimport time\n\nimport kombu\nimport six\n\n\nclass Producer(object):\n\n def __init__(self, dest_queue_name, rabb... | ```python
"""
Allow the ability to connect and publish to a queue.
"""
import logging
import time
import kombu
import six
class Producer(object):
def __init__(self, dest_queue_name, rabbitmq_host, rabbitmq_port=None,
serializer=None, compression=None,
userid=None, password=None):
connect_kwargs = {}
if userid is not None:
connect_kwargs['userid'] = userid
if password is not None:
connect_kwargs['password'] = password
if rabbitmq_port is not None:
connect_kwargs['port'] = rabbitmq_port
broker = kombu.BrokerConnection(rabbitmq_host, **connect_kwargs)
self.dest_queue = broker.SimpleQueue(
dest_queue_name,
serializer=serializer,
compression=compression,
)
def put(self, item):
"""
Put one item onto the queue.
"""
self.dest_queue.put(item)
def buffered_put(self, input_iter, batch_size, resume_threshold=0.1, delay_in_seconds=5.0):
"""
Given an input iterator, keep adding batches of items to the
destination queue.
After each batch, wait for the queue size to drop to a certain level
until putting in the next batch.
(Wait until the queue size is batch_size * resume_threshold.)
Note that it isn't exact, but it will attempt to ensure that the queue
size never goes (much) beyond batch_size.
"""
num_enqueued = 0
while True:
try:
logging.debug('Starting batch (batch_size={0})'.format(batch_size))
for i in range(batch_size):
self.put(six.next(input_iter))
num_enqueued += 1
logging.debug('Batch done. {0} items enqueued so far'.format(num_enqueued))
except StopIteration:
# We're done!
#
logging.debug('Input exhausted. {0} items enqueued in total'.format(num_enqueued))
break
# After each batch, we need to pause briefly.
# Otherwise get_num_messages won't include the messages that we
# just enqueued.
#
time.sleep(delay_in_seconds)
# Now that we have completed one batch, we need to wait.
max_size = resume_threshold * batch_size
num_messages = self.dest_queue.qsize()
while num_messages >= max_size:
logging.debug(
'Current queue size = {0}, waiting until size <= {1}'.format(
num_messages, max_size,
),
)
time.sleep(delay_in_seconds)
num_messages = self.dest_queue.qsize()
``` |
[
{
"content": "Here is the snippet:\n```python\n#-*- coding: utf-8 -*-\n\nimport sys\nimport pygame\nfrom pygame.locals import *\nfrom utils import *\nfrom initial import LoadMenuTextures\n\nclass MainMenu(LoadMenuTextures):\n \n def __init__(self, modes, win_w, win_h):\n self.showmain = True\n ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#-*- coding: utf-8 -*-\n\nimport sys\nimport pygame\nfrom pygame.locals import *\nfrom utils import *\nfrom initial import LoadMenuTextures\n\nclass MainMenu(LoadMenuTextures):\n \n def __init__(self, modes, win_w, win_h):\n self.showma... | ```python
#-*- coding: utf-8 -*-
import sys
import pygame
from pygame.locals import *
from utils import *
from initial import LoadMenuTextures
class MainMenu(LoadMenuTextures):
def __init__(self, modes, win_w, win_h):
self.showmain = True
self.submenu = False
self.click = False
self.modes = modes
LoadMenuTextures.__init__(self, win_w, win_h)
self.menuloop()
def mousepos(self):
self.pos = pygame.mouse.get_pos()
def is_inside(self, coords):
x, y = self.pos
if (x > coords[0] and x < coords[4]) and (y > coords[1] and y < coords[5]):
return True
else:
return False
def startbutton(self):
if self.is_inside(self.start_coords):
self.start.show_button(hover=True)
if self.click:
self.showmain = False
else:
self.start.show_button()
def aboutbutton(self):
if self.is_inside(self.about_coords):
self.about.show_button(hover=True)
if self.click:
self.submenu = True
else:
self.about.show_button()
def gobackbutton(self):
if self.is_inside(self.goback_coords):
self.goback.show_button(hover=True)
if self.click:
self.submenu = False
else:
self.goback.show_button()
def exitbutton(self):
if self.is_inside(self.exit_coords):
self.exit.show_button(hover=True)
if self.click:
sys.exit()
else:
self.exit.show_button()
def events(self):
self.mousepos()
self.click = False
for event in pygame.event.get():
if event.type == QUIT:
print("koniec")
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
if event.key == K_SPACE:
pass
if event.key == K_RETURN:
self.showmain = False
if event.key == K_LCTRL:
pass
elif event.type == MOUSEBUTTONDOWN:
self.click = True
def menuloop(self):
while self.showmain:
clear()
self.events()
self.mainback.show(0, 0)
if self.submenu:
self.aboutback.show(0, 0)
self.gobackbutton()
else:
self.startbutton()
self.aboutbutton()
self.exitbutton()
self.font.show(u"X: %s, Y: %s" % (self.pos), DARKRED, 10, 30, 1, 1)
pygame.display.flip()
clear()
self.mainback.show(0, 0)
self.frame.show(13, 14, 1.0, 1.0)
self.font.show(u"Ładuję...", DARKRED, 10, 30, 2, 2)
pygame.display.flip()
``` |
[
{
"content": "Here is a code file:\n```python\nfrom ZODB.utils import u64\nimport unittest\n\nfrom .. import Object\nfrom .base import DBSetup\n\nclass SearchTests(DBSetup, unittest.TestCase):\n\n def setUp(self):\n super(SearchTests, self).setUp()\n import newt.db\n self.db = newt.db.DB... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom ZODB.utils import u64\nimport unittest\n\nfrom .. import Object\nfrom .base import DBSetup\n\nclass SearchTests(DBSetup, unittest.TestCase):\n\n def setUp(self):\n super(SearchTests, self).setUp()\n import newt.db\n self... | ```python
from ZODB.utils import u64
import unittest
from .. import Object
from .base import DBSetup
class SearchTests(DBSetup, unittest.TestCase):
def setUp(self):
super(SearchTests, self).setUp()
import newt.db
self.db = newt.db.DB(self.dsn)
self.conn = self.db.open()
def tearDown(self):
self.db.close()
super(SearchTests, self).tearDown()
def store(self, index, **data):
self.conn.root()[index] = o = Object(**data)
self.conn.transaction_manager.commit()
return u64(o._p_serial)
def test_search(self):
for i in range(9):
tid = self.store(i, i=i)
sql = """
select * from newt
where state->>'i' >= %s and state->>'i' <= %s
order by zoid
"""
obs = self.conn.search(sql, '2', '5')
self.assertEqual([2, 3, 4, 5], [o.i for o in obs])
# test stand-alone API:
from .. import search
obs = search.search(self.conn, sql, '2', '5')
self.assertEqual([2, 3, 4, 5], [o.i for o in obs])
# separate conn (to make sure we get new ghosts, and
# ``where``` api and keyword args
conn2 = self.db.open()
obs2 = self.conn.where("state->>'i' >= %(a)s and state->>'i' <= %(b)s",
a='2', b='5')
self.assertEqual([2, 3, 4, 5], sorted(o.i for o in obs2))
self.assertEqual(set(o._p_oid for o in obs), # yes, these are
set(o._p_oid for o in obs2)) # persistent objects :)
# test stand-alone API:
obs2 = search.where(self.conn,
"state->>'i' >= %(a)s and state->>'i' <= %(b)s",
a='2', b='5')
self.assertEqual([2, 3, 4, 5], sorted(o.i for o in obs2))
# Test allready-mogrified data:
obs2 = search.where(self.conn,
b"state->>'i' >= '2' and state->>'i' <= '5'")
self.assertEqual([2, 3, 4, 5], sorted(o.i for o in obs2))
def test_search_batch(self):
for i in range(99):
tid = self.store(i, i=i)
conn2 = self.db.open()
sql = """
select * from newt
where (state->>'i')::int >= %(a)s and (state->>'i')::int <= %(b)s
order by zoid
"""
total, batch = conn2.search_batch(sql, dict(a=2, b=90), 10, 20)
self.assertEqual(total, 89)
self.assertEqual(list(range(12, 32)), [o.i for o in batch])
# We didn't end up with all of the objects getting loaded:
self.assertEqual(len(conn2._cache), 20)
# test stand-alone API:
from .. import search
totalbatch = search.search_batch(
conn2, sql, dict(a=2, b=90), 10, 20)
self.assertEqual((total, batch), totalbatch)
# where_batch:
total, batch = conn2.where_batch(
"(state->>'i')::int >= %(a)s and (state->>'i')::int <= %(b)s"
" order by zoid", dict(a=2, b=90), 10, 20)
self.assertEqual(total, 89)
self.assertEqual(list(range(12, 32)), [o.i for o in batch])
# where_batch binary/pre-mogrified:
total, batch = conn2.where_batch(
b"(state->>'i')::int >= 2 and (state->>'i')::int <= 90"
b" order by zoid", 10, 20)
self.assertEqual(total, 89)
self.assertEqual(list(range(12, 32)), [o.i for o in batch])
def test_search_no_args_no_problem_w_percent(self):
self.assertEqual(
[],
list(self.conn.search("select * from newt where 'x' like 'y%'")))
self.assertEqual(
(0, []),
self.conn.search_batch(
"select * from newt where 'x' like 'y%'", 1, 10))
def test_create_text_index_sql(self):
from .. import search
self.assertEqual(
expect_simple_text,
self.conn.create_text_index_sql('mytext', 'text'),
)
self.assertEqual(
expect_simple_text,
search.create_text_index_sql('mytext', 'text'),
)
self.assertEqual(
expect_text,
self.conn.create_text_index_sql('mytext', ['text', 'title']),
)
self.assertEqual(
expect_text,
search.create_text_index_sql('mytext', ['text', 'title']),
)
self.assertEqual(
expect_text_klingon,
search.create_text_index_sql('mytext', ['text', 'title'],
config='klingon'),
)
self.assertEqual(
expect_weighted_text,
self.conn.create_text_index_sql(
'mytext', 'text', ['title', 'description']),
)
self.assertEqual(
expect_weighted_text,
search.create_text_index_sql(
'mytext', 'text', ['title', 'description']),
)
self.assertEqual(
expect_more_weighted_text,
self.conn.create_text_index_sql(
'mytext',
'text',
['title', 'description'],
'keywords',
"state ->> 'really important'"),
)
self.assertEqual(
expect_more_weighted_text,
search.create_text_index_sql(
'mytext',
'text',
['title', 'description'],
'keywords',
"state ->> 'really important'"),
)
self.assertEqual(
expect_A_text,
self.conn.create_text_index_sql('mytext', A='text'),
)
self.assertEqual(
expect_A_text,
search.create_text_index_sql('mytext', A='text'),
)
self.assertRaises(TypeError, self.conn.create_text_index_sql, 'mytext')
self.assertRaises(TypeError, search.create_text_index_sql, 'mytext')
def test_create_text_index_arg_passthrough(self):
import mock
with mock.patch("newt.db.search.create_text_index_sql") as f:
f.return_value = 'select'
self.conn.create_text_index('txt', 'text', 'C', 'B', 'A',
config='Klingon')
f.assert_called_with('txt', 'text', 'C', 'B', 'A', 'Klingon')
def test_create_text_index(self):
self.conn.create_text_index('txt', 'text')
self.store('a', text='foo bar')
self.store('b', text='foo baz')
self.store('c', text='green eggs and spam')
self.assertEqual(
set((self.conn.root.a, self.conn.root.b)),
set(self.conn.where("txt(state) @@ 'foo'")),
)
self.assertEqual(
set((self.conn.root.a, )),
set(self.conn.where("txt(state) @@ 'foo & bar'")),
)
self.assertEqual(
set((self.conn.root.a, self.conn.root.c)),
set(self.conn.where("txt(state) @@ 'bar | green'")),
)
def test_create_text_index_standalone(self):
from .. import search
search.create_text_index(self.conn, 'txt', 'text')
self.store('a', text='foo bar')
self.store('b', text='foo baz')
self.store('c', text='green eggs and spam')
self.assertEqual(
set((self.conn.root.a, self.conn.root.b)),
set(self.conn.where("txt(state) @@ 'foo'")),
)
self.assertEqual(
set((self.conn.root.a, )),
set(self.conn.where("txt(state) @@ 'foo & bar'")),
)
self.assertEqual(
set((self.conn.root.a, self.conn.root.c)),
set(self.conn.where("txt(state) @@ 'bar | green'")),
)
def test_create_text_index_db_object(self):
from .. import search
conn = self.conn.root()
search.create_text_index(conn, 'txt', 'text')
self.store('a', text='foo bar')
self.store('b', text='foo baz')
self.store('c', text='green eggs and spam')
self.assertEqual(
set((self.conn.root.a, self.conn.root.b)),
set(self.conn.where("txt(state) @@ 'foo'")),
)
self.assertEqual(
set((self.conn.root.a, )),
set(self.conn.where("txt(state) @@ 'foo & bar'")),
)
self.assertEqual(
set((self.conn.root.a, self.conn.root.c)),
set(self.conn.where("txt(state) @@ 'bar | green'")),
)
def test_query_data(self):
from .. import search
self.store('a', text='foo bar')
self.store('b', text='foo baz')
self.store('c', text='green eggs and spam')
self.assertEqual(
[[1]],
[list(map(int, r)) for r in
self.conn.query_data(
"""select zoid from newt
where state @> '{"text": "foo bar"}'""")
])
self.assertEqual(
[[1]],
[list(map(int, r)) for r in
search.query_data(
self.conn,
"""select zoid from newt
where state @> '{"text": "foo bar"}'""")
])
# Make sure we can search using a ZODB connection:
self.assertEqual(
[[1]],
[list(map(int, r)) for r in
search.query_data(
self.conn._connection,
"""select zoid from newt
where state @> '{"text": "foo bar"}'""")
])
# For good mesaue, we can search with a persistent object:
self.assertEqual(
[[1]],
[list(map(int, r)) for r in
search.query_data(
self.conn._connection.root(),
"""select zoid from newt
where state @> '{"text": "foo bar"}'""")
])
expect_simple_text = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
result := to_tsvector(text);
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
expect_text = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
text = text || coalesce(state ->> 'title', '');
result := to_tsvector(text);
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
expect_text_klingon = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
text = text || coalesce(state ->> 'title', '');
result := to_tsvector('klingon', text);
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
expect_weighted_text = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
result := to_tsvector(text);
text = coalesce(state ->> 'title', '');
text = text || coalesce(state ->> 'description', '');
result := result || setweight(to_tsvector(text), 'C');
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
expect_more_weighted_text = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
result := to_tsvector(text);
text = coalesce(state ->> 'title', '');
text = text || coalesce(state ->> 'description', '');
result := result || setweight(to_tsvector(text), 'C');
text = coalesce(state ->> 'keywords', '');
result := result || setweight(to_tsvector(text), 'B');
text = coalesce(state ->> 'really important', '');
result := result || setweight(to_tsvector(text), 'A');
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
expect_A_text = """\
create or replace function mytext(state jsonb) returns tsvector as $$
declare
text text;
result tsvector;
begin
if state is null then return null; end if;
text = coalesce(state ->> 'text', '');
result := setweight(to_tsvector(text), 'A');
return result;
end
$$ language plpgsql immutable;
create index newt_mytext_idx on newt using gin (mytext(state));
"""
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# Import a whole load of stuff\r\nfrom System.IO import *\r\nfrom System.Drawing import *\r\nfrom System.Runtime.Remoting import *\r\nfrom System.Threading import *\r\nfrom System.Windows.Forms import *\r\nfrom System.Xml.Serialization import *\r\nfrom System... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# Import a whole load of stuff\r\nfrom System.IO import *\r\nfrom System.Drawing import *\r\nfrom System.Runtime.Remoting import *\r\nfrom System.Threading import *\r\nfrom System.Windows.Forms import *\r\nfrom System.Xml.Serialization import ... | ```python
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
def saveBlockConfig(path, config):
fs = FileStream(path, FileMode.Create)
s = XmlSerializer(BlockConfig)
s.Serialize(fs,config)
fs.Close()
def loadBlockConfig(path):
fs = FileStream(path, FileMode.Open)
s = XmlSerializer(BlockConfig)
bc = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def printWaveformCode(bc, name):
print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted))
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr):
fileSystem = Environs.FileSystem
print("Measuring parameters ...")
bh.StopPattern()
hc.UpdateRFPowerMonitor()
hc.UpdateRFFrequencyMonitor()
bh.StartPattern()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
hc.UpdateProbeAOMFreqMonitor()
hc.UpdatePumpAOMFreqMonitor()
#hc.CheckPiMonitor()
print("Measuring polarizer angle")
hc.UpdateProbePolAngleMonitor()
hc.UpdatePumpPolAngleMonitor()
pumpPolAngle = hc.pumpPolAngle
probePolAngle = hc.probePolAngle
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["rfState"] = rfState
bc.Settings["phaseScramblerV"] = scramblerV
bc.Settings["probePolarizerAngle"] = probePolAngle
bc.Settings["pumpPolarizerAngle"] = pumpPolAngle
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.Settings["pumpAOMFreq"] = hc.PumpAOMFrequencyCentre
bc.Settings["bBiasV"] = hc.SteppingBiasVoltage
bc.Settings["greenDCFM"] = hc.GreenSynthDCFM
bc.Settings["greenAmp"] = hc.GreenSynthOnAmplitude
bc.Settings["greenFreq"] = hc.GreenSynthOnFrequency
bc.Settings["measStartProbePwr"] = measProbePwr
bc.Settings["measStartPumpPwr"] = measPumpPwr
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
# these next 3, seemingly redundant, lines are to preserve backward compatibility
bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre
bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre
bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre
bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre
bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep
bc.GetModulationByName("LF1").Centre = hc.probeAOMVoltage
bc.GetModulationByName("LF1").Step = hc.probeAOMStep
bc.GetModulationByName("LF1").PhysicalCentre = hc.ProbeAOMFrequencyCentre
bc.GetModulationByName("LF1").PhysicalStep = hc.ProbeAOMFrequencyStep
bc.GetModulationByName("LF2").Centre = hc.PumpAOMVoltage
bc.GetModulationByName("LF2").Centre = hc.PumpAOMStep
bc.GetModulationByName("LF2").PhysicalCentre = hc.PumpAOMFrequencyCentre
bc.GetModulationByName("LF2").PhysicalStep = hc.PumpAOMFrequencyStep
# generate the waveform codes
print("Generating waveform codes ...")
eWave = bc.GetModulationByName("E").Waveform
eWave.Name = "E"
lf1Wave = bc.GetModulationByName("LF1").Waveform
lf1Wave.Name = "LF1"
ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F","LF2") )
bc.GetModulationByName("B").Waveform = ws["B"]
bc.GetModulationByName("DB").Waveform = ws["DB"]
bc.GetModulationByName("PI").Waveform = ws["PI"]
bc.GetModulationByName("RF1A").Waveform = ws["RF1A"]
bc.GetModulationByName("RF2A").Waveform = ws["RF2A"]
bc.GetModulationByName("RF1F").Waveform = ws["RF1F"]
bc.GetModulationByName("RF2F").Waveform = ws["RF2F"]
bc.GetModulationByName("LF2").Waveform = ws["LF2"]
# change the inversions of the static codes E and LF1
bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool()
bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool()
# print the waveform codes
# printWaveformCode(bc, "E")
# printWaveformCode(bc, "B")
# printWaveformCode(bc, "DB")
# printWaveformCode(bc, "PI")
# printWaveformCode(bc, "RF1A")
# printWaveformCode(bc, "RF2A")
# printWaveformCode(bc, "RF1F")
# printWaveformCode(bc, "RF2F")
# printWaveformCode(bc, "LF1")
# printWaveformCode(bc, "LF2")
# store e-switch info in block config
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
# store the E switch asymmetry in the block
bc.Settings["E0PlusBoost"] = hc.E0PlusBoost
return bc
# lock gains
# microamps of current per volt of control input
kSteppingBiasCurrentPerVolt = 2453.06
# max change in the b-bias voltage per block
kBMaxChange = 0.05
# volts of rf*a input required per cal's worth of offset
kRFAVoltsPerCal = 3.2
kRFAMaxChange = 0.1
# volts of rf*f input required per cal's worth of offset
kRFFVoltsPerCal = 8
kRFFMaxChange = 0.1
def updateLocks(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
# note the weird python syntax for a one element list
sigValue = pmtChannelValues.GetValue(("SIG",))
bValue = pmtChannelValues.GetValue(("B",))
dbValue = pmtChannelValues.GetValue(("DB",))
rf1aValue = pmtChannelValues.GetValue(("RF1A","DB"))
rf2aValue = pmtChannelValues.GetValue(("RF2A","DB"))
rf1fValue = pmtChannelValues.GetValue(("RF1F","DB"))
rf2fValue = pmtChannelValues.GetValue(("RF2F","DB"))
lf1Value = pmtChannelValues.GetValue(("LF1",))
lf1dbValue = pmtChannelValues.GetValue(("LF1","DB"))
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB: " + str(lf1dbValue)
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/10.0) * feedbackSign * (hc.CalStepCurrent * (bValue / dbValue)) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, -5, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (6.0/3.0) * (rf1aValue / dbValue) * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (6.0/3.0) * (rf2aValue / dbValue) * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (10.0/4.0) * (rf1fValue / dbValue) * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 5 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (10.0/4.0) * (rf2fValue / dbValue) * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 5 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
deltaLF1 = -1.25 * (lf1Value / dbValue)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.FLPZTVoltage - deltaLF1, hc.FLPZTStep, 5 - hc.FLPZTStep )
hc.SetFLPZTVoltage( newLF1 )
def updateLocksNL(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
normedpmtChannelValues = bh.DBlock.ChannelValues[8]
rf1ampReftChannelValues = bh.DBlock.ChannelValues[6]
rf2ampReftChannelValues = bh.DBlock.ChannelValues[7]
# note the weird python syntax for a one element list
sigValue = pmtChannelValues.GetValue(("SIG",))
bValue = pmtChannelValues.GetValue(("B",))
dbValue = pmtChannelValues.GetValue(("DB",))
bDBValue = normedpmtChannelValues.GetSpecialValue("BDB")
rf1aValue = pmtChannelValues.GetValue(("RF1A",))
rf1adbdbValue = normedpmtChannelValues.GetSpecialValue("RF1ADBDB")
rf2aValue = pmtChannelValues.GetValue(("RF2A",))
rf2adbdbValue = normedpmtChannelValues.GetSpecialValue("RF2ADBDB")
rf1fValue = pmtChannelValues.GetValue(("RF1F",))
rf1fdbdbValue = normedpmtChannelValues.GetSpecialValue("RF1FDBDB")
rf2fValue = pmtChannelValues.GetValue(("RF2F",))
rf2fdbdbValue = normedpmtChannelValues.GetSpecialValue("RF2FDBDB")
lf1Value = pmtChannelValues.GetValue(("LF1",))
lf1dbdbValue = normedpmtChannelValues.GetSpecialValue("LF1DBDB")
lf1dbValue = normedpmtChannelValues.GetSpecialValue("LF1DB")
lf2Value = pmtChannelValues.GetValue(("LF2",))
lf2dbdbValue = pmtChannelValues.GetSpecialValue("LF2DBDB")
rf1ampRefSig = rf1ampReftChannelValues.GetValue(("SIG",))
rf2ampRefSig = rf2ampReftChannelValues.GetValue(("SIG",))
rf1ampRefE = rf1ampReftChannelValues.GetValue(("E",))
rf2ampRefE = rf2ampReftChannelValues.GetValue(("E",))
rf1ampRefEErr = rf1ampReftChannelValues.GetError(("E",))
rf2ampRefEErr = rf2ampReftChannelValues.GetError(("E",))
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "B/DB" + str(bDBValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1A.DB/DB: " + str(rf1adbdbValue) + " RF2A.DB/DB: " + str(rf2adbdbValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB/DB: " + str(lf1dbdbValue)
print "LF2: " + str(lf2Value) + " LF2.DB/DB: " + str(lf2dbdbValue)
print "RF1 Reflected: " + str(rf1ampRefSig) + " RF2 Reflected: " + str(rf2ampRefSig)
print "{E}_RF1 Reflected: {" + str(rf1ampRefE) + " , " + str(rf1ampRefEErr) + " }"
print "{E}_RF2 Reflected: {" + str(rf2ampRefE) + " , " + str(rf2ampRefEErr) + " }"
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/10.0) * feedbackSign * (hc.CalStepCurrent * bDBValue) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
#deltaBias = 0
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, -5, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (1.0/2.0) * rf1adbdbValue * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
#deltaRF1A = 0
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
if (newRF1A == 4.9):
newSynthAmp = hc.GreenSynthOnAmplitude + 1
print "RF1A pinned, increasing synth to " + str(newSynthAmp) + " dBm."
print "Setting RF1A to 4.5 V."
newRF1A = 4.5
hc.SetRF1AttCentre( newRF1A )
hc.SetGreenSynthAmp(newSynthAmp)
else:
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (1.0/2.0) * rf2adbdbValue * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
#deltaRF2A = 0
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
if (newRF2A == 4.9):
newSynthAmp = hc.GreenSynthOnAmplitude + 1
print "RF2A pinned, increasing synth to " + str(newSynthAmp) + " dBm."
print "Setting RF2A to 4.5 V."
newRF2A = 4.5
hc.SetRF2AttCentre( newRF2A )
hc.SetGreenSynthAmp(newSynthAmp)
else:
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (1.0/2.0) * rf1fdbdbValue * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
#deltaRF1F = 0
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 1.1 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (1.0/2.0) * rf2fdbdbValue * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
#deltaRF2F = 0
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 1.1 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
# Laser frequency lock (-ve multiplier in f0 mode and +ve in f1)
deltaLF1 = -2.5* ( lf1dbdbValue)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
#deltaLF1 = 0
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.probeAOMVoltage - deltaLF1, hc.probeAOMStep, 10 - hc.probeAOMStep )
hc.SetprobeAOMVoltage( newLF1 )
# Laser frequency lock (-ve multiplier in f0 mode and +ve in f1)
deltaLF2 = - 2.5 * lf2dbdbValue
deltaLF2 = windowValue(deltaLF2, -0.1, 0.1)
#deltaLF2 = 0
print "Attempting to change LF2 by " + str(deltaLF2) + " V."
newLF2 = windowValue( hc.PumpAOMVoltage - deltaLF2, hc.PumpAOMStep, 10 - hc.PumpAOMStep )
hc.SetPumpAOMVoltage( newLF2 )
def windowValue(value, minValue, maxValue):
if ( (value < maxValue) & (value > minValue) ):
return value
else:
if (value < minValue):
return minValue
else:
return maxValue
kTargetRotationPeriod = 10
kReZeroLeakageMonitorsPeriod = 10
r = Random()
def EDMGo():
# Setup
f = None
fileSystem = Environs.FileSystem
dataPath = fileSystem.GetDataDirectory(fileSystem.Paths["edmDataPath"])
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
print("Data directory is : " + dataPath)
print("")
suggestedClusterName = fileSystem.GenerateNextDataFileName()
sm.SelectProfile("Scan B")
# User inputs data
cluster = prompt("Cluster name [" + suggestedClusterName +"]: ")
if cluster == "":
cluster = suggestedClusterName
print("Using cluster " + suggestedClusterName)
measProbePwr = prompt("Measured probe power (mV_3): ")
measPumpPwr = prompt("Measured pump power (mV_3): ")
nightBool = prompt("Night run (Y/N)? ")
eState = hc.EManualState
print("E-state: " + str(eState))
bState = hc.BManualState
print("B-state: " + str(bState))
rfState = hc.RFManualState
print("rf-state: " + str(rfState))
# this is to make sure the B current monitor is in a sensible state
hc.UpdateBCurrentMonitor()
# randomise Ramsey phase
scramblerV = 0.97156 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
#hc.SetRandomProbePosition()
#hc.SetRandomPumpPosition()
# calibrate leakage monitors
print("calibrating leakage monitors..")
print("E-field off")
hc.EnableGreenSynth( False )
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableBleed( False )
System.Threading.Thread.Sleep(500)
print("E-field on")
hc.EnableEField( True )
hc.EnableGreenSynth( True )
print("leakage monitors calibrated")
#print("Waiting For Polarizers (maybe)")
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr)
# loop and take data
blockIndex = 0
maxBlockIndex = 10000
dbValueList = []
Emag1List =[]
Emini1List=[]
Emini2List=[]
Emini3List=[]
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# save the block config and load into blockhead
print("Saving temp config.")
bc.Settings["clusterIndex"] = blockIndex
tempConfigFile ='%(p)stemp%(c)s_%(i)s.xml' % {'p': settingsPath, 'c': cluster, 'i': blockIndex}
saveBlockConfig(tempConfigFile, bc)
System.Threading.Thread.Sleep(500)
print("Loading temp config.")
bh.LoadConfig(tempConfigFile)
# take the block and save it
print("Running ...")
bh.AcquireAndWait()
print("Done.")
blockPath = '%(p)s%(c)s_%(i)s.zip' % {'p': dataPath, 'c': cluster, 'i': blockIndex}
bh.SaveBlock(blockPath)
print("Saved block "+ str(blockIndex) + ".")
# give mma a chance to analyse the block
print("Notifying Mathematica and waiting ...")
writeLatestBlockNotificationFile(cluster, blockIndex)
System.Threading.Thread.Sleep(5000)
print("Done.")
# increment and loop
File.Delete(tempConfigFile)
checkYAGAndFix()
blockIndex = blockIndex + 1
updateLocksNL(bState)
# randomise Ramsey phase
scramblerV = 0.97156 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
#hc.SetRandomProbePosition()
#hc.SetRandomPumpPosition()
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr)
pmtChannelValues = bh.DBlock.ChannelValues[0]
magChannelValues = bh.DBlock.ChannelValues[2]
mini1ChannelValues = bh.DBlock.ChannelValues[9]
mini2ChannelValues = bh.DBlock.ChannelValues[10]
mini3ChannelValues = bh.DBlock.ChannelValues[11]
dbValue = pmtChannelValues.GetValue(("DB",))
magEValue = magChannelValues.GetValue(("E",))
mini1EValue = mini1ChannelValues.GetValue(("E",))
mini2EValue = mini2ChannelValues.GetValue(("E",))
mini3EValue = mini3ChannelValues.GetValue(("E",))
# some code to stop EDMLoop if the laser unlocks.
# This averages the last 3 db values and stops the loop if the average is below 1
dbValueList.append(dbValue)
if (len(dbValueList) == 4):
del dbValueList[0]
print "DB values for last 3 blocks " + str(dbValueList).strip('[]')
runningdbMean =float(sum(dbValueList)) / len(dbValueList)
if ( runningdbMean < 1 and nightBool is "Y" ):
hc.EnableEField( False )
hc.SetArgonShutter( True )
break
Emag1List.append(magEValue)
if (len(Emag1List) == 11):
del Emag1List[0]
print "E_{Mag} for the last 10 blocks " + str(Emag1List).strip('[]')
runningEmag1Mean =float(sum(Emag1List)) / len(Emag1List)
print "Average E_{Mag} for the last 10 blocks " + str(runningEmag1Mean)
if (dbValue < 8):
print("Dodgy spot target rotation.")
for i in range(3):
hc.StepTarget(2)
System.Threading.Thread.Sleep(500)
if ((blockIndex % kReZeroLeakageMonitorsPeriod) == 0):
print("Recalibrating leakage monitors.")
# calibrate leakage monitors
print("calibrating leakage monitors..")
print("E-field off")
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableBleed( False )
System.Threading.Thread.Sleep(500)
print("E-field on")
hc.EnableEField( True )
print("leakage monitors calibrated")
bh.StopPattern()
def run_script():
EDMGo()
``` |
[
{
"content": "Write the code verbatim:\n```python\n# -*- coding: utf-8 -*-\n\n# Copyright(C) 2012 Romain Bignon\n#\n# This file is part of weboob.\n#\n# weboob is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Soft... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Copyright(C) 2012 Romain Bignon\n#\n# This file is part of weboob.\n#\n# weboob is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\... | ```python
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.capabilities.translate import ICapTranslate, Translation, TranslationFail, LanguageNotSupported
from weboob.tools.backend import BaseBackend
from weboob.tools.browser import StandardBrowser
__all__ = ['EbonicsBackend']
class EbonicsBackend(BaseBackend, ICapTranslate):
NAME = 'ebonics'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '0.e'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'English to Ebonics translation service'
BROWSER = StandardBrowser
def translate(self, lan_from, lan_to, text):
if lan_from != 'English' or lan_to != 'Nigger!':
raise LanguageNotSupported()
with self.browser:
data = {'English': text.encode('utf-8')}
doc = self.browser.location('http://joel.net/EBONICS/Translator', urllib.urlencode(data))
try:
text = doc.getroot().cssselect('div.translateform div.bubble1 div.bubblemid')[0].text
except IndexError:
raise TranslationFail()
if text is None:
raise TranslationFail()
translation = Translation(0)
translation.lang_src = unicode(lan_from)
translation.lang_dst = unicode(lan_to)
translation.text = unicode(text).strip()
return translation
``` |
[
{
"content": "Return the code unaltered:\n```python\n# Prints a tree of all items in the configuration\n# vim: tabstop=4 shiftwidth=4 expandtab\n\nimport kconfiglib\nimport sys\n\n# Integers representing symbol types\nUNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)\n\n# Strings to use for types\nTYPENAME =... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# Prints a tree of all items in the configuration\n# vim: tabstop=4 shiftwidth=4 expandtab\n\nimport kconfiglib\nimport sys\n\n# Integers representing symbol types\nUNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)\n\n# Strings to use for t... | ```python
# Prints a tree of all items in the configuration
# vim: tabstop=4 shiftwidth=4 expandtab
import kconfiglib
import sys
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
# Strings to use for types
TYPENAME = {UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate",
STRING: "string", HEX: "hex", INT: "int"}
def print_with_indent(s, indent):
print((" " * indent) + s)
def print_items(items, outdir, indent):
for item in items:
if item.is_symbol() or item.is_choice():
text = item.get_help()
elif item.is_menu():
text = item.get_title()
else:
# Comment
text = item.get_text()
if item.is_symbol():
#print_with_indent("config {0}".format(item.get_name()), indent)
var = "CONFIG_%s" %item.get_name()
f.write(" %s.rst\n" %var)
config = open("%s/%s.rst" % (outdir, var), "w")
config.write("\n.. _CONFIG_%s:\n" %item.get_name())
config.write("\n%s\n" %var)
config.write("%s\n\n" %(len("%s" %var) * '#' ))
if text:
config.write("\n%s\n\n" %text)
else:
config.write("\nThe configuration item %s:\n\n" %var)
config.write(item.rest())
config.close()
elif item.is_menu():
#print_with_indent('menu "{0}"'.format(item.get_title()), indent)
print_items(item.get_items(), outdir, indent + 2)
elif item.is_choice():
#print_with_indent('choice', indent)
print_items(item.get_items(), outdir, indent + 2)
elif item.is_comment():
pass
#print_with_indent('comment "{0}"'.format(item.get_text()), indent)
f = open("%s/index.rst" % (sys.argv[2]), "w")
f.write(""".. _configuration:
Configuration Options Reference Guide
#####################################
Introduction
************
Kconfig files describe the configuration symbols supported in the build
system, the logical organization and structure that group the symbols in menus
and sub-menus, and the relationships between the different configuration
symbols that govern the valid configuration combinations.
The Kconfig files are distributed across the build directory tree. The files
are organized based on their common characteristics and on what new symbols
they add to the configuration menus.
The configuration options' information is extracted directly from :program:`Kconfig`
using the :file:`~/doc/scripts/genrest/genrest.py` script.
Supported Options
*****************
""")
f.write(".. toctree::\n :maxdepth: 2\n\n")
conf = kconfiglib.Config(sys.argv[1])
print_items(conf.get_top_level_items(), sys.argv[2], 0)
f.close()
``` |
[
{
"content": "Here is a code file:\n```python\nfrom fabric.contrib.files import append, exists, sed\nfrom fabric.api import env, local, run\nimport random\n\n\nREPO_URL = 'https://github.com/rmelchorv/TDD-Cuervos.git'\n\ndef deploy():\n\tsite_folder = '/home/%s/sites/%s' % (env.user, env.host)\n\tsource_folder ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom fabric.contrib.files import append, exists, sed\nfrom fabric.api import env, local, run\nimport random\n\n\nREPO_URL = 'https://github.com/rmelchorv/TDD-Cuervos.git'\n\ndef deploy():\n\tsite_folder = '/home/%s/sites/%s' % (env.user, env.host)\n... | ```python
from fabric.contrib.files import append, exists, sed
from fabric.api import env, local, run
import random
REPO_URL = 'https://github.com/rmelchorv/TDD-Cuervos.git'
def deploy():
site_folder = '/home/%s/sites/%s' % (env.user, env.host)
source_folder = site_folder + '/source'
_create_directory_structure_if_necessary(site_folder)
_get_latest_source(source_folder)
_update_settings(source_folder, env.host)
_update_virtualenv(source_folder)
_update_static_files(source_folder)
_update_database(source_folder)
def _create_directory_structure_if_necessary(site_folder):
for subfolder in ('database', 'static', 'virtualenv', 'source'):
run('mkdir -p %s/%s' % (site_folder, subfolder))
def _get_latest_source(source_folder):
if exists(source_folder + '/.git'):
run('cd %s && git fetch' % (source_folder,))
else:
run('git clone %s %s' % (REPO_URL, source_folder))
current_commit = local("git log -n 1 --format=%H", capture=True)
run('cd %s && git reset --hard %s' % (source_folder, current_commit))
def _update_settings(source_folder, site_name):
settings_path = source_folder + '/superlists/settings.py'
sed(settings_path, "DEBUG = True", "DEBUG = False")
sed(settings_path,
'ALLOWED_HOSTS =.+$',
'ALLOWED_HOSTS = ["%s"]' % (site_name,)
)
secret_key_file = source_folder + '/superlists/secret_key.py'
if not exists(secret_key_file):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
append(secret_key_file, "SECRET_KEY = '%s'" % (key,))
append(settings_path, '\nfrom .secret_key import SECRET_KEY')
def _update_virtualenv(source_folder):
virtualenv_folder = source_folder + '/../virtualenv'
if not exists(virtualenv_folder + '/bin/pip'):
run('virtualenv --python=python3 %s' % (virtualenv_folder,))
run('%s/bin/pip install -r %s/requirements.txt' % (
virtualenv_folder, source_folder
))
def _update_static_files(source_folder):
run('cd %s && ../virtualenv/bin/python3 manage.py collectstatic --noinput' % (
source_folder,
))
def _update_database(source_folder):
run('cd %s && ../virtualenv/bin/python3 manage.py migrate --noinput' % (
source_folder,
))
``` |
[
{
"content": "Here is the script:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n#... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. Y... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from xml.parsers import expat
from lxml import etree
import webob
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_instances': {}}
def cache_db_instances(self, instances):
"""
Allow API methods to store instances from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_instances = self._extension_data['db_instances']
for instance in instances:
db_instances[instance['uuid']] = instance
def cache_db_instance(self, instance):
"""
Allow API methods to store an instance from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_db_instances([instance])
def get_db_instances(self):
"""
Allow an API extension to get previously stored instances within
the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances']
def get_db_instance(self, instance_uuid):
"""
Allow an API extension to get a previously stored instance
within the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances'].get(instance_uuid)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if not "Content-Type" in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found"""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = minidom.parseString(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=unicode(ex_value)))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
``` |
[
{
"content": "Replicate the source code:\n```python\n\"\"\"Unit tests for FrameGetProtocolVersionRequest.\"\"\"\nimport unittest\n\nfrom pyvlx.api.frame_creation import frame_from_raw\nfrom pyvlx.api.frames import FrameGetProtocolVersionRequest\n\n\nclass TestFrameGetProtocolVersionRequest(unittest.TestCase):\n... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"Unit tests for FrameGetProtocolVersionRequest.\"\"\"\nimport unittest\n\nfrom pyvlx.api.frame_creation import frame_from_raw\nfrom pyvlx.api.frames import FrameGetProtocolVersionRequest\n\n\nclass TestFrameGetProtocolVersionRequest(unitt... | ```python
"""Unit tests for FrameGetProtocolVersionRequest."""
import unittest
from pyvlx.api.frame_creation import frame_from_raw
from pyvlx.api.frames import FrameGetProtocolVersionRequest
class TestFrameGetProtocolVersionRequest(unittest.TestCase):
"""Test class FrameGetProtocolVersionRequest."""
# pylint: disable=too-many-public-methods,invalid-name
EXAMPLE_FRAME = b"\x00\x03\x00\n\t"
def test_bytes(self):
"""Test FrameGetProtocolVersionRequest with NO_TYPE."""
frame = FrameGetProtocolVersionRequest()
self.assertEqual(bytes(frame), self.EXAMPLE_FRAME)
def test_frame_from_raw(self):
"""Test parse FrameGetProtocolVersionRequest from raw."""
frame = frame_from_raw(self.EXAMPLE_FRAME)
self.assertTrue(isinstance(frame, FrameGetProtocolVersionRequest))
def test_str(self):
"""Test string representation of FrameGetProtocolVersionRequest."""
frame = FrameGetProtocolVersionRequest()
self.assertEqual(str(frame), "<FrameGetProtocolVersionRequest/>")
``` |
[
{
"content": "Here is a code snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"psutil is a cross-platform library fo... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"psutil is a cross-pla... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
from __future__ import division
import collections
import errno
import functools
import os
import signal
import subprocess
import sys
import time
try:
import pwd
except ImportError:
pwd = None
from . import _common
from ._common import memoize
from ._compat import callable, long
from ._compat import PY3 as _PY3
from ._common import (STATUS_RUNNING, # NOQA
STATUS_SLEEPING,
STATUS_DISK_SLEEP,
STATUS_STOPPED,
STATUS_TRACING_STOP,
STATUS_ZOMBIE,
STATUS_DEAD,
STATUS_WAKING,
STATUS_LOCKED,
STATUS_IDLE, # bsd
STATUS_WAITING) # bsd
from ._common import (CONN_ESTABLISHED,
CONN_SYN_SENT,
CONN_SYN_RECV,
CONN_FIN_WAIT1,
CONN_FIN_WAIT2,
CONN_TIME_WAIT,
CONN_CLOSE,
CONN_CLOSE_WAIT,
CONN_LAST_ACK,
CONN_LISTEN,
CONN_CLOSING,
CONN_NONE)
from ._common import (NIC_DUPLEX_FULL, # NOQA
NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN)
if sys.platform.startswith("linux"):
from . import _pslinux as _psplatform
from ._pslinux import (IOPRIO_CLASS_NONE, # NOQA
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE)
# Linux >= 2.6.36
if _psplatform.HAS_PRLIMIT:
from ._psutil_linux import (RLIM_INFINITY, # NOQA
RLIMIT_AS,
RLIMIT_CORE,
RLIMIT_CPU,
RLIMIT_DATA,
RLIMIT_FSIZE,
RLIMIT_LOCKS,
RLIMIT_MEMLOCK,
RLIMIT_NOFILE,
RLIMIT_NPROC,
RLIMIT_RSS,
RLIMIT_STACK)
# Kinda ugly but considerably faster than using hasattr() and
# setattr() against the module object (we are at import time:
# speed matters).
from . import _psutil_linux
try:
RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
except AttributeError:
pass
try:
RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
except AttributeError:
pass
try:
RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
except AttributeError:
pass
try:
RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
except AttributeError:
pass
try:
RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
except AttributeError:
pass
del _psutil_linux
elif sys.platform.startswith("win32"):
from . import _pswindows as _psplatform
from ._psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS, # NOQA
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
from ._pswindows import CONN_DELETE_TCB # NOQA
elif sys.platform.startswith("darwin"):
from . import _psosx as _psplatform
elif sys.platform.startswith("freebsd"):
from . import _psbsd as _psplatform
elif sys.platform.startswith("sunos"):
from . import _pssunos as _psplatform
from ._pssunos import (CONN_IDLE, # NOQA
CONN_BOUND)
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__ = [
# exceptions
"Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
"TimeoutExpired",
# constants
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
"AF_LINK",
"NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
# classes
"Process", "Popen",
# functions
"pid_exists", "pids", "process_iter", "wait_procs", # proc
"virtual_memory", "swap_memory", # memory
"cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
"net_io_counters", "net_connections", "net_if_addrs", # network
"net_if_stats",
"disk_io_counters", "disk_partitions", "disk_usage", # disk
"users", "boot_time", # others
]
__all__.extend(_psplatform.__extra__all__)
__author__ = "Giampaolo Rodola'"
__version__ = "3.0.1"
version_info = tuple([int(num) for num in __version__.split('.')])
AF_LINK = _psplatform.AF_LINK
_TOTAL_PHYMEM = None
_POSIX = os.name == 'posix'
_WINDOWS = os.name == 'nt'
_timer = getattr(time, 'monotonic', time.time)
# Sanity check in case the user messed up with psutil installation
# or did something weird with sys.path. In this case we might end
# up importing a python module using a C extension module which
# was compiled for a different version of psutil.
# We want to prevent that by failing sooner rather than later.
# See: https://github.com/giampaolo/psutil/issues/564
if (int(__version__.replace('.', '')) !=
getattr(_psplatform.cext, 'version', None)):
msg = "version conflict: %r C extension module was built for another " \
"version of psutil (different than %s)" % (_psplatform.cext.__file__,
__version__)
raise ImportError(msg)
# =====================================================================
# --- exceptions
# =====================================================================
class Error(Exception):
"""Base exception class. All other psutil exceptions inherit
from this one.
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return self.msg
class NoSuchProcess(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists.
"""
def __init__(self, pid, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process no longer exists " + details
class ZombieProcess(NoSuchProcess):
"""Exception raised when querying a zombie process. This is
raised on OSX, BSD and Solaris only, and not always: depending
on the query the OS may be able to succeed anyway.
On Linux all zombie processes are querable (hence this is never
raised). Windows doesn't have zombie processes.
"""
def __init__(self, pid, name=None, ppid=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.ppid = ppid
self.name = name
self.msg = msg
if msg is None:
if name and ppid:
details = "(pid=%s, name=%s, ppid=%s)" % (
self.pid, repr(self.name), self.ppid)
elif name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process still exists but it's a zombie " + details
class AccessDenied(Error):
"""Exception raised when permission to perform an action is denied."""
def __init__(self, pid=None, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if (pid is not None) and (name is not None):
self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg = "(pid=%s)" % self.pid
else:
self.msg = ""
class TimeoutExpired(Error):
"""Raised on Process.wait(timeout) if timeout expires and process
is still alive.
"""
def __init__(self, seconds, pid=None, name=None):
Error.__init__(self, "timeout after %s seconds" % seconds)
self.seconds = seconds
self.pid = pid
self.name = name
if (pid is not None) and (name is not None):
self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg += " (pid=%s)" % self.pid
# push exception classes into platform specific module namespace
_psplatform.NoSuchProcess = NoSuchProcess
_psplatform.ZombieProcess = ZombieProcess
_psplatform.AccessDenied = AccessDenied
_psplatform.TimeoutExpired = TimeoutExpired
# =====================================================================
# --- Process class
# =====================================================================
def _assert_pid_not_reused(fun):
"""Decorator which raises NoSuchProcess in case a process is no
longer running or its PID has been reused.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
return fun(self, *args, **kwargs)
return wrapper
class Process(object):
"""Represents an OS process with the given PID.
If PID is omitted current process PID (os.getpid()) is used.
Raise NoSuchProcess if PID does not exist.
Note that most of the methods of this class do not make sure
the PID of the process being queried has been reused over time.
That means you might end up retrieving an information referring
to another process in case the original one this instance
refers to is gone in the meantime.
The only exceptions for which process identity is pre-emptively
checked and guaranteed are:
- parent()
- children()
- nice() (set)
- ionice() (set)
- rlimit() (set)
- cpu_affinity (set)
- suspend()
- resume()
- send_signal()
- terminate()
- kill()
To prevent this problem for all other methods you can:
- use is_running() before querying the process
- if you're continuously iterating over a set of Process
instances use process_iter() which pre-emptively checks
process identity for every yielded instance
"""
def __init__(self, pid=None):
self._init(pid)
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError('pid must be an integer (got %r)' % pid)
if pid < 0:
raise ValueError('pid must be a positive integer (got %s)'
% pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except ZombieProcess:
# Let's consider a zombie process as legitimate as
# tehcnically it's still alive (it can be queried,
# although not always, and it's returned by pids()).
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = 'no process found with pid %s' % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except ZombieProcess:
details = "(pid=%s (zombie))" % self.pid
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
# Test for equality with another Process object based
# on PID and creation time.
if not isinstance(other, Process):
return NotImplemented
return self._ident == other._ident
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._hash is None:
self._hash = hash(self._ident)
return self._hash
# --- utility methods
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied or ZombieProcess exception is raised when
retrieving that particular process information.
"""
excluded_names = set(
['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
retdict = dict()
ls = set(attrs or [x for x in dir(self)])
for name in ls:
if name.startswith('_'):
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except (AccessDenied, ZombieProcess):
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
def parent(self):
"""Return the parent process as a Process object pre-emptively
checking whether PID has been reused.
If no parent is known return None.
"""
ppid = self.ppid()
if ppid is not None:
ctime = self.create_time()
try:
parent = Process(ppid)
if parent.create_time() <= ctime:
return parent
# ...else ppid has been reused by another process
except NoSuchProcess:
pass
def is_running(self):
"""Return whether this process is running.
It also checks if PID has been reused by another process in
which case return False.
"""
if self._gone:
return False
try:
# Checking if PID is alive is not enough as the PID might
# have been reused by another process: we also want to
# check process identity.
# Process identity / uniqueness over time is greanted by
# (PID + creation time) and that is verified in __eq__.
return self == Process(self.pid)
except NoSuchProcess:
self._gone = True
return False
# --- actual API
@property
def pid(self):
"""The process PID."""
return self._pid
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
return self._proc.ppid()
else:
self._ppid = self._ppid or self._proc.ppid()
return self._ppid
def name(self):
"""The process name. The return value is cached after first call."""
if self._name is None:
name = self._proc.name()
if _POSIX and len(name) >= 15:
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline()
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
self._proc._name = name
self._name = name
return self._name
def exe(self):
"""The process executable as an absolute path.
May also be an empty string.
The return value is cached after first call.
"""
def guess_it(fallback):
# try to guess exe from cmdline[0] in absence of a native
# exe representation
cmdline = self.cmdline()
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
exe = cmdline[0] # the possible exe
# Attempt to guess only in case of an absolute path.
# It is not safe otherwise as the process might have
# changed cwd.
if (os.path.isabs(exe) and
os.path.isfile(exe) and
os.access(exe, os.X_OK)):
return exe
if isinstance(fallback, AccessDenied):
raise fallback
return fallback
if self._exe is None:
try:
exe = self._proc.exe()
except AccessDenied as err:
return guess_it(fallback=err)
else:
if not exe:
# underlying implementation can legitimately return an
# empty string; if that's the case we don't want to
# raise AD while guessing from the cmdline
try:
exe = guess_it(fallback=exe)
except AccessDenied:
pass
self._exe = exe
return self._exe
def cmdline(self):
"""The command line this process has been called with."""
return self._proc.cmdline()
def status(self):
"""The process current status as a STATUS_* constant."""
try:
return self._proc.status()
except ZombieProcess:
return STATUS_ZOMBIE
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if _POSIX:
if pwd is None:
# might happen if python was installed from sources
raise ImportError(
"requires pwd module shipped with standard python")
real_uid = self.uids().real
try:
return pwd.getpwuid(real_uid).pw_name
except KeyError:
# the uid can't be resolved by the system
return str(real_uid)
else:
return self._proc.username()
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
The return value is cached after first call.
"""
if self._create_time is None:
self._create_time = self._proc.create_time()
return self._create_time
def cwd(self):
"""Process current working directory as an absolute path."""
return self._proc.cwd()
def nice(self, value=None):
"""Get or set process niceness (priority)."""
if value is None:
return self._proc.nice_get()
else:
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
self._proc.nice_set(value)
if _POSIX:
def uids(self):
"""Return process UIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.uids()
def gids(self):
"""Return process GIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.gids()
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._proc.terminal()
def num_fds(self):
"""Return the number of file descriptors opened by this
process (POSIX only).
"""
return self._proc.num_fds()
# Linux, BSD and Windows only
if hasattr(_psplatform.Process, "io_counters"):
def io_counters(self):
"""Return process I/O statistics as a
(read_count, write_count, read_bytes, write_bytes)
namedtuple.
Those are the number of read/write calls performed and the
amount of bytes read and written by the process.
"""
return self._proc.io_counters()
# Linux and Windows >= Vista only
if hasattr(_psplatform.Process, "ionice_get"):
def ionice(self, ioclass=None, value=None):
"""Get or set process I/O niceness (priority).
On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
'value' is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
On Windows only 'ioclass' is used and it can be set to 2
(normal), 1 (low) or 0 (very low).
Available on Linux and Windows > Vista only.
"""
if ioclass is None:
if value is not None:
raise ValueError("'ioclass' must be specified")
return self._proc.ionice_get()
else:
return self._proc.ionice_set(ioclass, value)
# Linux only
if hasattr(_psplatform.Process, "rlimit"):
def rlimit(self, resource, limits=None):
"""Get or set process resource limits as a (soft, hard)
tuple.
'resource' is one of the RLIMIT_* constants.
'limits' is supposed to be a (soft, hard) tuple.
See "man prlimit" for further info.
Available on Linux only.
"""
if limits is None:
return self._proc.rlimit(resource)
else:
return self._proc.rlimit(resource, limits)
# Windows, Linux and BSD only
if hasattr(_psplatform.Process, "cpu_affinity_get"):
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
(Windows, Linux and BSD only).
"""
# Automatically remove duplicates both on get and
# set (for get it's not really necessary, it's
# just for extra safety).
if cpus is None:
return list(set(self._proc.cpu_affinity_get()))
else:
self._proc.cpu_affinity_set(list(set(cpus)))
if _WINDOWS:
def num_handles(self):
"""Return the number of handles opened by this process
(Windows only).
"""
return self._proc.num_handles()
def num_ctx_switches(self):
"""Return the number of voluntary and involuntary context
switches performed by this process.
"""
return self._proc.num_ctx_switches()
def num_threads(self):
"""Return the number of threads used by this process."""
return self._proc.num_threads()
def threads(self):
"""Return threads opened by process as a list of
(id, user_time, system_time) namedtuples representing
thread id and thread CPU times (user/system).
"""
return self._proc.threads()
@_assert_pid_not_reused
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, 'ppid_map'):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except (NoSuchProcess, ZombieProcess):
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = collections.defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except (NoSuchProcess, ZombieProcess):
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except (NoSuchProcess, ZombieProcess):
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
def cpu_percent(self, interval=None):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is 0.0 or None (default) compares process times
to system CPU times elapsed since last call, returning
immediately (non-blocking). That means that the first time
this is called it will return a meaningful 0.0 value.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
In this case is recommended for accuracy that this function
be called with at least 0.1 seconds between calls.
Examples:
>>> import psutil
>>> p = psutil.Process(os.getpid())
>>> # blocking
>>> p.cpu_percent(interval=1)
2.0
>>> # non-blocking (percentage since last call)
>>> p.cpu_percent(interval=None)
2.9
>>>
"""
blocking = interval is not None and interval > 0.0
num_cpus = cpu_count()
if _POSIX:
def timer():
return _timer() * num_cpus
else:
def timer():
return sum(cpu_times())
if blocking:
st1 = timer()
pt1 = self._proc.cpu_times()
time.sleep(interval)
st2 = timer()
pt2 = self._proc.cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = timer()
pt2 = self._proc.cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# The utilization split between all CPUs.
# Note: a percentage > 100 is legitimate as it can result
# from a process with multiple threads running on different
# CPU cores, see:
# http://stackoverflow.com/questions/1032357
# https://github.com/giampaolo/psutil/issues/474
overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
except ZeroDivisionError:
# interval was too low
return 0.0
else:
return round(overall_percent, 1)
def cpu_times(self):
"""Return a (user, system) namedtuple representing the
accumulated process time, in seconds.
This is the same as os.times() but per-process.
"""
return self._proc.cpu_times()
def memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by 'ps'.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
columns of taskmgr.exe.
"""
return self._proc.memory_info()
def memory_info_ex(self):
"""Return a namedtuple with variable fields depending on the
platform representing extended memory information about
this process. All numbers are expressed in bytes.
"""
return self._proc.memory_info_ex()
def memory_percent(self):
"""Compare physical system memory to process resident memory
(RSS) and calculate process memory utilization as a percentage.
"""
rss = self._proc.memory_info()[0]
# use cached value if available
total_phymem = _TOTAL_PHYMEM or virtual_memory().total
try:
return (rss / float(total_phymem)) * 100
except ZeroDivisionError:
return 0.0
def memory_maps(self, grouped=True):
"""Return process' mapped memory regions as a list of namedtuples
whose fields are variable depending on the platform.
If 'grouped' is True the mapped regions with the same 'path'
are grouped together and the different memory fields are summed.
If 'grouped' is False every mapped region is shown as a single
entity and the namedtuple will also include the mapped region's
address space ('addr') and permission set ('perms').
"""
it = self._proc.memory_maps()
if grouped:
d = {}
for tupl in it:
path = tupl[2]
nums = tupl[3:]
try:
d[path] = map(lambda x, y: x + y, d[path], nums)
except KeyError:
d[path] = nums
nt = _psplatform.pmmap_grouped
return [nt(path, *d[path]) for path in d] # NOQA
else:
nt = _psplatform.pmmap_ext
return [nt(*x) for x in it]
def open_files(self):
"""Return files opened by process as a list of
(path, fd) namedtuples including the absolute file name
and file descriptor number.
"""
return self._proc.open_files()
def connections(self, kind='inet'):
"""Return connections opened by process as a list of
(fd, family, type, laddr, raddr, status) namedtuples.
The 'kind' parameter filters for connections that match the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
"""
return self._proc.connections(kind)
if _POSIX:
def _send_signal(self, sig):
# XXX: according to "man 2 kill" PID 0 has a special
# meaning as it refers to <<every process in the process
# group of the calling process>>, so should we prevent
# it here?
try:
os.kill(self.pid, sig)
except OSError as err:
if err.errno == errno.ESRCH:
self._gone = True
raise NoSuchProcess(self.pid, self._name)
if err.errno == errno.EPERM:
raise AccessDenied(self.pid, self._name)
raise
@_assert_pid_not_reused
def send_signal(self, sig):
"""Send a signal to process pre-emptively checking whether
PID has been reused (see signal module constants) .
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
if _POSIX:
self._send_signal(sig)
else:
if sig == signal.SIGTERM:
self._proc.kill()
else:
raise ValueError("only SIGTERM is supported on Windows")
@_assert_pid_not_reused
def suspend(self):
"""Suspend process execution with SIGSTOP pre-emptively checking
whether PID has been reused.
On Windows this has the effect ot suspending all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGSTOP)
else:
self._proc.suspend()
@_assert_pid_not_reused
def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGCONT)
else:
self._proc.resume()
@_assert_pid_not_reused
def terminate(self):
"""Terminate the process with SIGTERM pre-emptively checking
whether PID has been reused.
On Windows this is an alias for kill().
"""
if _POSIX:
self._send_signal(signal.SIGTERM)
else:
self._proc.kill()
@_assert_pid_not_reused
def kill(self):
"""Kill the current process with SIGKILL pre-emptively checking
whether PID has been reused.
"""
if _POSIX:
self._send_signal(signal.SIGKILL)
else:
self._proc.kill()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of os.getpid(), also return its exit code, else None.
If the process is already terminated immediately return None
instead of raising NoSuchProcess.
If timeout (in seconds) is specified and process is still alive
raise TimeoutExpired.
To wait for multiple Process(es) use psutil.wait_procs().
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._proc.wait(timeout)
# =====================================================================
# --- Popen class
# =====================================================================
class Popen(Process):
"""A more convenient interface to stdlib subprocess module.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
properties and methods of psutil.Process class as a unified
interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name()
'python'
>>> p.uids()
user(real=1000, effective=1000, saved=1000)
>>> p.username()
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
Unlike subprocess.Popen this class pre-emptively checks wheter PID
has been reused on send_signal(), terminate() and kill() so that
you don't accidentally terminate another process, fixing
http://bugs.python.org/issue6973.
For a complete documentation refer to:
http://docs.python.org/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
# Explicitly avoid to raise NoSuchProcess in case the process
# spawned by subprocess.Popen terminates too quickly, see:
# https://github.com/giampaolo/psutil/issues/193
self.__subproc = subprocess.Popen(*args, **kwargs)
self._init(self.__subproc.pid, _ignore_nsp=True)
def __dir__(self):
return sorted(set(dir(Popen) + dir(subprocess.Popen)))
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, name))
def wait(self, timeout=None):
if self.__subproc.returncode is not None:
return self.__subproc.returncode
ret = super(Popen, self).wait(timeout)
self.__subproc.returncode = ret
return ret
# =====================================================================
# --- system processes related functions
# =====================================================================
def pids():
"""Return a list of current running PIDs."""
return _psplatform.pids()
def pid_exists(pid):
"""Return True if given PID exists in the current process list.
This is faster than doing "pid in psutil.pids()" and
should be preferred.
"""
if pid < 0:
return False
elif pid == 0 and _POSIX:
# On POSIX we use os.kill() to determine PID existence.
# According to "man 2 kill" PID 0 has a special meaning
# though: it refers to <<every process in the process
# group of the calling process>> and that is not we want
# to do here.
return pid in pids()
else:
return _psplatform.pid_exists(pid)
_pmap = {}
def process_iter():
"""Return a generator yielding a Process instance for all
running processes.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
Cached Process instances are checked for identity so that you're
safe in case a PID has been reused by another process, in which
case the cached instance is updated.
The sorting order in which processes are yielded is based on
their PIDs.
"""
def add(pid):
proc = Process(pid)
_pmap[proc.pid] = proc
return proc
def remove(pid):
_pmap.pop(pid, None)
a = set(pids())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
for pid, proc in sorted(list(_pmap.items()) +
list(dict.fromkeys(new_pids).items())):
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
yield proc
def wait_procs(procs, timeout=None, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new 'returncode' attribute indicating
process exit status (may be None).
'callback' is a function which gets called every time a process
terminates (a Process instance is passed as callback argument).
Function will return as soon as all processes terminate or when
timeout occurs.
Typical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
>>> for p in alive:
... p.kill()
"""
def check_gone(proc, timeout):
try:
returncode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if returncode is not None or not proc.is_running():
proc.returncode = returncode
gone.add(proc)
if callback is not None:
callback(proc)
if timeout is not None and not timeout >= 0:
msg = "timeout must be a positive integer, got %s" % timeout
raise ValueError(msg)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
if timeout is not None:
deadline = _timer() + timeout
while alive:
if timeout is not None and timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
max_timeout = 1.0 / len(alive)
if timeout is not None:
timeout = min((deadline - _timer()), max_timeout)
if timeout <= 0:
break
check_gone(proc, timeout)
else:
check_gone(proc, max_timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
check_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
# =====================================================================
# --- CPU related functions
# =====================================================================
@memoize
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If logical is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
return _psplatform.cpu_count_logical()
else:
return _psplatform.cpu_count_physical()
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple.
Every CPU time represents the seconds the CPU has spent in the given mode.
The namedtuple's fields availability varies depending on the platform:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
- steal (Linux >= 2.6.11)
- guest (Linux >= 2.6.24)
- guest_nice (Linux >= 3.2.0)
When percpu is True return a list of namedtuples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.cpu_times()
else:
return _psplatform.per_cpu_times()
_last_cpu_times = cpu_times()
_last_per_cpu_times = cpu_times(percpu=True)
def cpu_percent(interval=None, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When interval is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When interval is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately (non
blocking). That means the first time this is called it will
return a meaningless 0.0 value which you should ignore.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When percpu is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
Examples:
>>> # blocking, system-wide
>>> psutil.cpu_percent(interval=1)
2.0
>>>
>>> # blocking, per-cpu
>>> psutil.cpu_percent(interval=1, percpu=True)
[2.0, 1.0]
>>>
>>> # non-blocking (percentage since last call)
>>> psutil.cpu_percent(interval=None)
2.9
>>>
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
t1_all = sum(t1)
t1_busy = t1_all - t1.idle
t2_all = sum(t2)
t2_busy = t2_all - t2.idle
# this usually indicates a float precision issue
if t2_busy <= t1_busy:
return 0.0
busy_delta = t2_busy - t1_busy
all_delta = t2_all - t1_all
busy_perc = (busy_delta / all_delta) * 100
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
# Use separate global vars for cpu_times_percent() so that it's
# independent from cpu_percent() and they can both be used within
# the same program.
_last_cpu_times_2 = _last_cpu_times
_last_per_cpu_times_2 = _last_per_cpu_times
def cpu_times_percent(interval=None, percpu=False):
"""Same as cpu_percent() but provides utilization percentages
for each specific CPU time as is returned by cpu_times().
For instance, on Linux we'll get:
>>> cpu_times_percent()
cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
>>>
interval and percpu arguments have the same meaning as in
cpu_percent().
"""
global _last_cpu_times_2
global _last_per_cpu_times_2
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
nums = []
all_delta = sum(t2) - sum(t1)
for field in t1._fields:
field_delta = getattr(t2, field) - getattr(t1, field)
try:
field_perc = (100 * field_delta) / all_delta
except ZeroDivisionError:
field_perc = 0.0
field_perc = round(field_perc, 1)
if _WINDOWS:
# XXX
# Work around:
# https://github.com/giampaolo/psutil/issues/392
# CPU times are always supposed to increase over time
# or at least remain the same and that's because time
# cannot go backwards.
# Surprisingly sometimes this might not be the case on
# Windows where 'system' CPU time can be smaller
# compared to the previous call, resulting in corrupted
# percentages (< 0 or > 100).
# I really don't know what to do about that except
# forcing the value to 0 or 100.
if field_perc > 100.0:
field_perc = 100.0
elif field_perc < 0.0:
field_perc = 0.0
nums.append(field_perc)
return _psplatform.scputimes(*nums)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times_2
_last_cpu_times_2 = cpu_times()
return calculate(t1, _last_cpu_times_2)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times_2
_last_per_cpu_times_2 = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times_2):
ret.append(calculate(t1, t2))
return ret
# =====================================================================
# --- system memory related functions
# =====================================================================
def virtual_memory():
"""Return statistics about system memory usage as a namedtuple
including the following fields, expressed in bytes:
- total:
total physical memory available.
- available:
the actual amount of available memory that can be given
instantly to processes that request more memory in bytes; this
is calculated by summing different memory values depending on
the platform (e.g. free + buffers + cached on Linux) and it is
supposed to be used to monitor actual memory usage in a cross
platform fashion.
- percent:
the percentage usage calculated as (total - available) / total * 100
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
OSX: active + inactive + wired
BSD: active + wired + cached
LINUX: total - free
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
Platform-specific fields:
- active (UNIX):
memory currently in use or very recently used, and so it is in RAM.
- inactive (UNIX):
memory that is marked as not used.
- buffers (BSD, Linux):
cache for things like file system metadata.
- cached (BSD, OSX):
cache for various things.
- wired (OSX, BSD):
memory that is marked to always stay in RAM. It is never moved to disk.
- shared (BSD):
memory that may be simultaneously accessed by multiple processes.
The sum of 'used' and 'available' does not necessarily equal total.
On Windows 'available' and 'free' are the same.
"""
global _TOTAL_PHYMEM
ret = _psplatform.virtual_memory()
# cached for later use in Process.memory_percent()
_TOTAL_PHYMEM = ret.total
return ret
def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following fields:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory()
# =====================================================================
# --- disks/paritions related functions
# =====================================================================
def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of
(device, mountpoint, fstype, opts) namedtuple.
'opts' field is a raw string separated by commas indicating mount
options which may vary depending on the platform.
If "all" parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
def disk_io_counters(perdisk=False):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
- read_count: number of reads
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- read_time: time spent reading from disk (in milliseconds)
- write_time: time spent writing to disk (in milliseconds)
If perdisk is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedtuple
described above as the values.
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
rawdict = _psplatform.disk_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any physical disk")
if perdisk:
for disk, fields in rawdict.items():
rawdict[disk] = _common.sdiskio(*fields)
return rawdict
else:
return _common.sdiskio(*[sum(x) for x in zip(*rawdict.values())])
# =====================================================================
# --- network related functions
# =====================================================================
def net_io_counters(pernic=False):
"""Return network I/O statistics as a namedtuple including
the following fields:
- bytes_sent: number of bytes sent
- bytes_recv: number of bytes received
- packets_sent: number of packets sent
- packets_recv: number of packets received
- errin: total number of errors while receiving
- errout: total number of errors while sending
- dropin: total number of incoming packets which were dropped
- dropout: total number of outgoing packets which were dropped
(always 0 on OSX and BSD)
If pernic is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
"""
rawdict = _psplatform.net_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any network interface")
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _common.snetio(*fields)
return rawdict
else:
return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
def net_connections(kind='inet'):
"""Return system-wide connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
In case of limited privileges 'fd' and 'pid' may be set to -1
and None respectively.
The 'kind' parameter filters for connections that fit the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
On OSX this function requires root privileges.
"""
return _psplatform.net_connections(kind)
def net_if_addrs():
"""Return the addresses associated to each NIC (network interface
card) installed on the system as a dictionary whose keys are the
NIC names and value is a list of namedtuples for each address
assigned to the NIC. Each namedtuple includes 4 fields:
- family
- address
- netmask
- broadcast
'family' can be either socket.AF_INET, socket.AF_INET6 or
psutil.AF_LINK, which refers to a MAC address.
'address' is the primary address, 'netmask' and 'broadcast'
may be None.
Note: you can have more than one address of the same family
associated with each interface.
"""
has_enums = sys.version_info >= (3, 4)
if has_enums:
import socket
rawlist = _psplatform.net_if_addrs()
rawlist.sort(key=lambda x: x[1]) # sort by family
ret = collections.defaultdict(list)
for name, fam, addr, mask, broadcast in rawlist:
if has_enums:
try:
fam = socket.AddressFamily(fam)
except ValueError:
if os.name == 'nt' and fam == -1:
fam = _psplatform.AF_LINK
elif (hasattr(_psplatform, "AF_LINK") and
_psplatform.AF_LINK == fam):
# Linux defines AF_LINK as an alias for AF_PACKET.
# We re-set the family here so that repr(family)
# will show AF_LINK rather than AF_PACKET
fam = _psplatform.AF_LINK
ret[name].append(_common.snic(fam, addr, mask, broadcast))
return dict(ret)
def net_if_stats():
"""Return information about each NIC (network interface card)
installed on the system as a dictionary whose keys are the
NIC names and value is a namedtuple with the following fields:
- isup: whether the interface is up (bool)
- duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
NIC_DUPLEX_UNKNOWN
- speed: the NIC speed expressed in mega bits (MB); if it can't
be determined (e.g. 'localhost') it will be set to 0.
- mtu: the maximum transmission unit expressed in bytes.
"""
return _psplatform.net_if_stats()
# =====================================================================
# --- other system related functions
# =====================================================================
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
# Note: we are not caching this because it is subject to
# system clock updates.
return _psplatform.boot_time()
def users():
"""Return users currently connected on the system as a list of
namedtuples including the following fields.
- user: the name of the user
- terminal: the tty or pseudo-tty associated with the user, if any.
- host: the host name associated with the entry, if any.
- started: the creation time as a floating point number expressed in
seconds since the epoch.
"""
return _psplatform.users()
def test():
"""List info of all currently running processes emulating ps aux
output.
"""
import datetime
today_day = datetime.date.today()
templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s %s"
attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
'create_time', 'memory_info']
if _POSIX:
attrs.append('uids')
attrs.append('terminal')
print(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
"START", "TIME", "COMMAND"))
for p in process_iter():
try:
pinfo = p.as_dict(attrs, ad_value='')
except NoSuchProcess:
pass
else:
if pinfo['create_time']:
ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
if ctime.date() == today_day:
ctime = ctime.strftime("%H:%M")
else:
ctime = ctime.strftime("%b%d")
else:
ctime = ''
cputime = time.strftime("%M:%S",
time.localtime(sum(pinfo['cpu_times'])))
try:
user = p.username()
except KeyError:
if _POSIX:
if pinfo['uids']:
user = str(pinfo['uids'].real)
else:
user = ''
else:
raise
except Error:
user = ''
if _WINDOWS and '\\' in user:
user = user.split('\\')[1]
vms = pinfo['memory_info'] and \
int(pinfo['memory_info'].vms / 1024) or '?'
rss = pinfo['memory_info'] and \
int(pinfo['memory_info'].rss / 1024) or '?'
memp = pinfo['memory_percent'] and \
round(pinfo['memory_percent'], 1) or '?'
print(templ % (
user[:10],
pinfo['pid'],
pinfo['cpu_percent'],
memp,
vms,
rss,
pinfo.get('terminal', '') or '?',
ctime,
cputime,
pinfo['name'].strip() or '?'))
del memoize, division
if sys.version_info < (3, 0):
del num
if __name__ == "__main__":
test()
``` |
[
{
"content": "Write the code verbatim:\n```python\n\"\"\"This file contains code used in \"Think Stats\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2014 Allen B. Downey\nLicense: GNU GPLv3 http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nfrom __future__ import print_function\n\nimport ma... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n\"\"\"This file contains code used in \"Think Stats\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2014 Allen B. Downey\nLicense: GNU GPLv3 http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nfrom __future__ import print_funct... | ```python
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import random
import scipy.stats
import brfss
import first
import thinkstats2
import thinkplot
def Summarize(data):
mean = data.mean()
std = data.std()
median = thinkstats2.Median(data)
print('mean', mean)
print('std', std)
print('median', median)
print('skewness', thinkstats2.Skewness(data))
print('pearson skewness',
thinkstats2.PearsonMedianSkewness(data))
return mean, median
def ComputeSkewnesses():
def VertLine(x, y):
thinkplot.Plot([x, x], [0, y], color='0.6', linewidth=1)
live, firsts, others = first.MakeFrames()
data = live.totalwgt_lb.dropna()
print('Birth weight')
mean, median = Summarize(data)
y = 0.35
VertLine(mean, y)
thinkplot.Text(mean-0.15, 0.1*y, 'mean', horizontalalignment='right')
VertLine(median, y)
thinkplot.Text(median+0.1, 0.1*y, 'median', horizontalalignment='left')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Save(root='density_totalwgt_kde',
xlabel='lbs',
ylabel='PDF')
df = brfss.ReadBrfss(nrows=None)
data = df.wtkg2.dropna()
print('Adult weight')
mean, median = Summarize(data)
y = 0.02499
VertLine(mean, y)
thinkplot.Text(mean+1, 0.1*y, 'mean', horizontalalignment='left')
VertLine(median, y)
thinkplot.Text(median-1.5, 0.1*y, 'median', horizontalalignment='right')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='adult weight')
thinkplot.Save(root='density_wtkg2_kde',
xlabel='kg',
ylabel='PDF',
xlim=[0, 200])
def MakePdfExample():
# mean and var of women's heights in cm, from the BRFSS
mean, var = 163, 52.8
std = math.sqrt(var)
# make a PDF and compute a density, FWIW
pdf = thinkstats2.GaussianPdf(mean, std)
print(pdf.Density(mean + std))
# make a PMF and plot it
thinkplot.PrePlot(2)
thinkplot.Pdf(pdf, label='Gaussian')
# make a sample, make an estimated PDF, and plot it
sample = [random.gauss(mean, std) for i in range(100)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Save(root='pdf_example',
xlabel='Height (cm)',
ylabel='Density')
def main():
thinkstats2.RandomSeed(17)
MakePdfExample()
ComputeSkewnesses()
if __name__ == '__main__':
main()
``` |
[
{
"content": "Replicate the source code:\n```python\nimport numpy as np\n\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.optim as optim\n\nfrom torch.autograd import Variable, Function\n\nfrom binge.layers import ScaledEmbedding, ZeroEmbedding\nfrom binge.native import a... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport numpy as np\n\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.optim as optim\n\nfrom torch.autograd import Variable, Function\n\nfrom binge.layers import ScaledEmbedding, ZeroEmbedding\nfrom binge... | ```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
from binge.layers import ScaledEmbedding, ZeroEmbedding
from binge.native import align, get_lib
def _gpu(tensor, gpu=False):
if gpu:
return tensor.cuda()
else:
return tensor
def _cpu(tensor):
if tensor.is_cuda:
return tensor.cpu()
else:
return tensor
def _minibatch(tensor, batch_size):
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
def binarize_array(array):
assert array.shape[1] % 8 == 0
array = (np.sign(array) > 0.0).astype(np.bool)
array = np.packbits(array, axis=1)
return array
class BinaryDot(Function):
def forward(self, x, y):
x_scale = x.abs().mean(1)
y_scale = y.abs().mean(1)
sign_x = x.sign()
sign_y = y.sign()
xnor = sign_x * sign_y
self.save_for_backward(x, y)
return xnor.sum(1) * x_scale * y_scale
def backward(self, grad_output):
x, y = self.saved_tensors
embedding_dim = x.size()[1]
grad_output = grad_output.expand_as(x)
x_scale = x.abs().mean(1).expand_as(x)
y_scale = y.abs().mean(1).expand_as(y)
sign_x = x.sign()
sign_y = y.sign()
dx_dsign = (x.abs() <= 1.0).float()
dy_dsign = (y.abs() <= 1.0).float()
grads = (grad_output * sign_y * y_scale *
(1.0 / embedding_dim + dx_dsign * x_scale),
grad_output * sign_x * x_scale *
(1.0 / embedding_dim + dy_dsign * y_scale))
return grads
def binary_dot(x, y):
return BinaryDot()(x, y)
class BilinearNet(nn.Module):
def __init__(self,
num_users,
num_items,
embedding_dim,
xnor=False,
sparse=False):
super().__init__()
self.xnor = xnor
self.embedding_dim = embedding_dim
self.user_embeddings = ScaledEmbedding(num_users, embedding_dim,
sparse=sparse)
self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
sparse=sparse)
self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)
def forward(self, user_ids, item_ids):
user_embedding = self.user_embeddings(user_ids)
item_embedding = self.item_embeddings(item_ids)
user_embedding = user_embedding.view(-1, self.embedding_dim)
item_embedding = item_embedding.view(-1, self.embedding_dim)
user_bias = self.user_biases(user_ids).view(-1, 1)
item_bias = self.item_biases(item_ids).view(-1, 1)
if self.xnor:
dot = binary_dot(user_embedding, item_embedding)
else:
dot = (user_embedding * item_embedding).sum(1)
return dot + user_bias + item_bias
class FactorizationModel(object):
"""
A number of classic factorization models, implemented in PyTorch.
Available loss functions:
- pointwise logistic
- BPR: Rendle's personalized Bayesian ranking
- adaptive: a variant of WARP with adaptive selection of negative samples
- regression: minimizing the regression loss between true and predicted ratings
- truncated_regression: truncated regression model, that jointly models
the likelihood of a rating being given and the value
of the rating itself.
Performance notes: neural network toolkits do not perform well on sparse tasks
like recommendations. To achieve acceptable speed, either use the `sparse` option
on a CPU or use CUDA with very big minibatches (1024+).
"""
def __init__(self,
loss='pointwise',
xnor=False,
embedding_dim=64,
n_iter=3,
batch_size=64,
l2=0.0,
learning_rate=1e-3,
use_cuda=False,
sparse=False,
random_seed=None):
assert loss in ('pointwise',
'bpr',
'adaptive')
self._loss = loss
self._embedding_dim = embedding_dim
self._n_iter = n_iter
self._batch_size = batch_size
self._l2 = l2
self._learning_rate = learning_rate
self._use_cuda = use_cuda
self._sparse = sparse
self._xnor = xnor
self._random_state = np.random.RandomState(random_seed)
self._num_users = None
self._num_items = None
self._net = None
def get_params(self):
return {'loss': self._loss,
'embedding_dim': self._embedding_dim,
'n_iter': self._n_iter,
'batch_size': self._batch_size,
'l2': self._l2,
'learning_rate': self._learning_rate,
'use_cuda': self._use_cuda,
'xnor': self._xnor}
def _pointwise_loss(self, users, items, ratings):
negatives = Variable(
_gpu(
torch.from_numpy(self._random_state.randint(0,
self._num_items,
len(users))),
self._use_cuda)
)
positives_loss = (1.0 - F.sigmoid(self._net(users, items)))
negatives_loss = F.sigmoid(self._net(users, negatives))
return torch.cat([positives_loss, negatives_loss]).mean()
def _bpr_loss(self, users, items, ratings):
negatives = Variable(
_gpu(
torch.from_numpy(self._random_state.randint(0,
self._num_items,
len(users))),
self._use_cuda)
)
return (1.0 - F.sigmoid(self._net(users, items) -
self._net(users, negatives))).mean()
def _adaptive_loss(self, users, items, ratings, n_neg_candidates=5):
negatives = Variable(
_gpu(
torch.from_numpy(
self._random_state.randint(0, self._num_items,
(len(users), n_neg_candidates))),
self._use_cuda)
)
negative_predictions = self._net(
users.repeat(n_neg_candidates, 1).transpose(0,1),
negatives
).view(-1, n_neg_candidates)
best_negative_prediction, _ = negative_predictions.max(1)
positive_prediction = self._net(users, items)
return torch.mean(torch.clamp(best_negative_prediction -
positive_prediction
+ 1.0, 0.0))
def _shuffle(self, interactions):
users = interactions.row
items = interactions.col
ratings = interactions.data
shuffle_indices = np.arange(len(users))
self._random_state.shuffle(shuffle_indices)
return (users[shuffle_indices].astype(np.int64),
items[shuffle_indices].astype(np.int64),
ratings[shuffle_indices].astype(np.float32))
def fit(self, interactions, verbose=False):
"""
Fit the model.
Arguments
---------
interactions: np.float32 coo_matrix of shape [n_users, n_items]
the matrix containing
user-item interactions.
verbose: Bool, optional
Whether to print epoch loss statistics.
"""
self._num_users, self._num_items = interactions.shape
self._net = _gpu(
BilinearNet(self._num_users,
self._num_items,
self._embedding_dim,
xnor=self._xnor,
sparse=self._sparse),
self._use_cuda
)
optimizer = optim.Adam(self._net.parameters(),
lr=self._learning_rate,
weight_decay=self._l2)
if self._loss == 'pointwise':
loss_fnc = self._pointwise_loss
elif self._loss == 'bpr':
loss_fnc = self._bpr_loss
else:
loss_fnc = self._adaptive_loss
for epoch_num in range(self._n_iter):
users, items, ratings = self._shuffle(interactions)
user_ids_tensor = _gpu(torch.from_numpy(users),
self._use_cuda)
item_ids_tensor = _gpu(torch.from_numpy(items),
self._use_cuda)
ratings_tensor = _gpu(torch.from_numpy(ratings),
self._use_cuda)
epoch_loss = 0.0
for (batch_user,
batch_item,
batch_ratings) in zip(_minibatch(user_ids_tensor,
self._batch_size),
_minibatch(item_ids_tensor,
self._batch_size),
_minibatch(ratings_tensor,
self._batch_size)):
user_var = Variable(batch_user)
item_var = Variable(batch_item)
ratings_var = Variable(batch_ratings)
optimizer.zero_grad()
loss = loss_fnc(user_var, item_var, ratings_var)
epoch_loss += loss.data[0]
loss.backward()
# return loss
optimizer.step()
if verbose:
print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))
def predict(self, user_ids, item_ids=None):
"""
Compute the recommendation score for user-item pairs.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the user-item pairs for which
a prediction is to be computed
item_ids: optional, np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. If not provided, scores for
all items will be computed.
"""
if item_ids is None:
item_ids = np.arange(self._num_items, dtype=np.int64)
if isinstance(user_ids, int):
user_id = user_ids
user_ids = np.empty_like(item_ids)
user_ids.fill(user_id)
user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))
item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))
user_var = Variable(_gpu(user_ids, self._use_cuda))
item_var = Variable(_gpu(item_ids, self._use_cuda))
out = self._net(user_var, item_var)
return _cpu(out.data).numpy().flatten()
def get_scorer(self):
get_param = lambda l: _cpu([x for x in l.parameters()][0]).data.numpy().squeeze()
if self._xnor:
return XNORScorer(get_param(self._net.user_embeddings),
get_param(self._net.user_biases),
get_param(self._net.item_embeddings),
get_param(self._net.item_biases))
else:
return Scorer(get_param(self._net.user_embeddings),
get_param(self._net.user_biases),
get_param(self._net.item_embeddings),
get_param(self._net.item_biases))
class Scorer:
def __init__(self,
user_vectors,
user_biases,
item_vectors,
item_biases):
self._user_vectors = align(user_vectors)
self._user_biases = align(user_biases)
self._item_vectors = align(item_vectors)
self._item_biases = align(item_biases)
self._lib = get_lib()
def _parameters(self):
return (self._user_vectors,
self._item_vectors,
self._user_biases,
self._item_biases)
def predict(self, user_id, item_ids=None):
if item_ids is None:
item_ids = slice(0, None, None)
return self._lib.predict_float_256(
align(self._user_vectors[user_id]),
self._item_vectors[item_ids],
self._user_biases[user_id],
self._item_biases[item_ids])
def _predict_bench(self, user_id, out):
return self._lib.predict_float_256(
align(self._user_vectors[user_id]),
self._item_vectors,
self._user_biases[user_id],
self._item_biases,
out)
def memory(self):
get_size = lambda x: x.itemsize * x.size
return sum(get_size(x) for x in self._parameters())
class XNORScorer:
def __init__(self,
user_vectors,
user_biases,
item_vectors,
item_biases):
assert item_vectors.shape[1] >= 32
self._user_norms = align(np.abs(user_vectors).mean(axis=1))
self._item_norms = align(np.abs(item_vectors).mean(axis=1))
self._user_vectors = align(binarize_array(user_vectors))
self._user_biases = align(user_biases)
self._item_vectors = align(binarize_array(item_vectors))
self._item_biases = align(item_biases)
self._lib = get_lib()
def _parameters(self):
return (self._user_norms,
self._item_norms,
self._user_vectors,
self._item_vectors,
self._user_biases,
self._item_biases)
def predict(self, user_id, item_ids=None):
if item_ids is None:
item_ids = slice(0, None, None)
return self._lib.predict_xnor_256(
align(self._user_vectors[user_id]),
self._item_vectors[item_ids],
self._user_biases[user_id],
self._item_biases[item_ids],
self._user_norms[user_id],
self._item_norms[item_ids])
def _predict_bench(self, user_id, out):
return self._lib.predict_xnor_256(
align(self._user_vectors[user_id]),
self._item_vectors,
self._user_biases[user_id],
self._item_biases,
self._user_norms[user_id],
self._item_norms,
out)
def memory(self):
get_size = lambda x: x.itemsize * x.size
return sum(get_size(x) for x in self._parameters())
class PopularityModel:
def __init__(self):
self._popularity = None
def fit(self, interactions):
self._popularity = interactions.getnnz(axis=0).astype(np.float32)
assert len(self._popularity) == interactions.shape[1]
def predict(self, user_ids, item_ids=None):
if item_ids is not None:
return self._popularity[item_ids]
else:
return self._popularity
``` |
[
{
"content": "Repeat the code precisely:\n```python\nfrom pixelpuncher.game.utils.message import add_game_message\nfrom pixelpuncher.game.utils.messages import learned_skill_message\nfrom pixelpuncher.player.models import PlayerSkill, Skill\n\n\ndef add_starting_skills(player):\n skills = Skill.objects.filte... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom pixelpuncher.game.utils.message import add_game_message\nfrom pixelpuncher.game.utils.messages import learned_skill_message\nfrom pixelpuncher.player.models import PlayerSkill, Skill\n\n\ndef add_starting_skills(player):\n skills = Ski... | ```python
from pixelpuncher.game.utils.message import add_game_message
from pixelpuncher.game.utils.messages import learned_skill_message
from pixelpuncher.player.models import PlayerSkill, Skill
def add_starting_skills(player):
skills = Skill.objects.filter(level=1)
for skill in skills:
create_player_skill(player, skill)
def add_skills(player, level):
skills = Skill.objects.filter(level=level)
for skill in skills:
create_player_skill(player, skill)
add_game_message(player, learned_skill_message(skill))
def create_player_skill(player, skill):
player_skill = PlayerSkill()
player_skill.skill = skill
player_skill.player = player
player_skill.hit_percentage = skill.hit_percentage
player_skill.critical_percentage = skill.critical_percentage
player_skill.critical_multipler = skill.critical_multipler
player_skill.energy_cost = skill.energy_cost
player_skill.number_of_dice = skill.number_of_dice
player_skill.dice_sides = skill.dice_sides
player_skill.bonus = skill.bonus
player_skill.remaining_for_level_up = skill.gain_frequency
player_skill.save()
return player_skill
def level_skill(player_skill):
level_up = False
player_skill.remaining_for_level_up -= 1
if player_skill.remaining_for_level_up == 0:
level_up = True
player_skill.level += 1
player_skill.hit_percentage += player_skill.skill.gained_hit
player_skill.critical_percentage += player_skill.skill.gained_critical
player_skill.critical_multipler += player_skill.skill.gained_critical_multipler
player_skill.energy_cost += player_skill.skill.gained_energy_cost
player_skill.bonus += player_skill.skill.gained_bonus
player_skill.remaining_for_level_up = player_skill.skill.gain_frequency
player_skill.save()
return level_up
``` |
[
{
"content": "Here is the code content:\n```python\nfrom .base import Base\n\n\nclass Capture(Base):\n @classmethod\n def get_resource_class(cls, client):\n from ..resources.captures import Captures\n\n return Captures(client)\n\n @property\n def id(self):\n return self._get_pro... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nfrom .base import Base\n\n\nclass Capture(Base):\n @classmethod\n def get_resource_class(cls, client):\n from ..resources.captures import Captures\n\n return Captures(client)\n\n @property\n def id(self):\n retu... | ```python
from .base import Base
class Capture(Base):
@classmethod
def get_resource_class(cls, client):
from ..resources.captures import Captures
return Captures(client)
@property
def id(self):
return self._get_property("id")
@property
def mode(self):
return self._get_property("mode")
@property
def amount(self):
return self._get_property("amount")
@property
def settlement_amount(self):
return self._get_property("settlementAmount")
@property
def payment_id(self):
return self._get_property("paymentId")
@property
def shipment_id(self):
return self._get_property("shipmentId")
@property
def settlement_id(self):
return self._get_property("settlementId")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def payment(self):
"""Return the payment for this capture."""
return self.client.payments.get(self.payment_id)
@property
def shipment(self):
"""Return the shipment for this capture."""
from .shipment import Shipment
url = self._get_link("shipment")
if url:
resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)
return Shipment(resp)
@property
def settlement(self):
"""Return the settlement for this capture."""
return self.client.settlements.get(self.settlement_id)
``` |
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\n#\n# installation_process.py\n# \n# Copyright 2013 Antergos\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foun... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# installation_process.py\n# \n# Copyright 2013 Antergos\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Fr... | ```python
# -*- coding: utf-8 -*-
#
# installation_process.py
#
# Copyright 2013 Antergos
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import multiprocessing
import queue
import subprocess
import os
import sys
import time
import shutil
import xml.etree.ElementTree as etree
import urllib.request
import urllib.error
import crypt
import download
import config
import logging
import info
# Insert the src/pacman directory at the front of the path.
base_dir = os.path.dirname(__file__) or '.'
pacman_dir = os.path.join(base_dir, 'pacman')
sys.path.insert(0, pacman_dir)
# Insert the src/parted directory at the front of the path.
base_dir = os.path.dirname(__file__) or '.'
parted_dir = os.path.join(base_dir, 'parted')
sys.path.insert(0, parted_dir)
import fs_module as fs
import misc
import pac
import auto_partition
_postinstall_script = 'postinstall.sh'
class InstallError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InstallationProcess(multiprocessing.Process):
def __init__(self, settings, callback_queue, mount_devices, \
fs_devices, ssd=None, alternate_package_list="", blvm=False):
multiprocessing.Process.__init__(self)
self.alternate_package_list = alternate_package_list
self.callback_queue = callback_queue
self.settings = settings
# Used to know if there is a lvm partition (from advanced install)
# so we'll have to add the lvm2 hook to mkinitcpio
self.blvm = blvm
self.method = self.settings.get('partition_mode')
self.queue_event('info', _("Installing using the '%s' method") % self.method)
self.ssd = ssd
self.mount_devices = mount_devices
# Check desktop selected to load packages needed
self.desktop = self.settings.get('desktop')
# Set defaults
self.desktop_manager = 'gdm'
self.network_manager = 'NetworkManager'
self.card = []
# Packages to be removed
self.conflicts = []
self.fs_devices = fs_devices
self.running = True
self.error = False
self.special_dirs_mounted = False
def queue_fatal_event(self, txt):
# Queue the fatal event and exit process
self.error = True
self.running = False
self.queue_event('error', txt)
self.callback_queue.join()
sys.exit(1)
def queue_event(self, event_type, event_text=""):
try:
self.callback_queue.put_nowait((event_type, event_text))
except queue.Full:
pass
@misc.raise_privileges
def run(self):
p = multiprocessing.current_process()
#log.debug("Starting: [%d] %s" % (p.pid, p.name))
# Common vars
self.packages = []
self.dest_dir = "/install"
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
else:
# If we're recovering from a failed/stoped install, there'll be
# some mounted directories. Try to unmount them first
install_dirs = { "boot", "dev", "proc", "sys", "var" }
for p in install_dirs:
p = os.path.join(self.dest_dir, p)
(fsname, fstype, writable) = misc.mount_info(p)
if fsname:
subprocess.check_call(['umount', p])
self.queue_event('debug', "%s unmounted" % p)
# now we can unmount /install
(fsname, fstype, writable) = misc.mount_info(self.dest_dir)
if fsname:
subprocess.check_call(['umount', self.dest_dir])
self.queue_event('debug', "%s unmounted" % self.dest_dir)
self.kernel_pkg = "linux"
self.vmlinuz = "vmlinuz-%s" % self.kernel_pkg
self.initramfs = "initramfs-%s" % self.kernel_pkg
self.arch = os.uname()[-1]
# Create and format partitions
if self.method == 'automatic':
self.auto_device = self.settings.get('auto_device')
self.queue_event('debug', "Creating partitions and their filesystems in %s" % self.auto_device)
# TODO: Ask for a key password if we are using LUKS (in installation_automatic.py)
# if no key password is given a key file is generated and stored in /boot
# (see auto_partition.py)
try:
ap = auto_partition.AutoPartition(self.dest_dir,
self.auto_device,
self.settings.get("use_luks"),
self.settings.get("use_lvm"),
self.settings.get("luks_key_pass"))
ap.run()
# Get mount_devices and fs_devices
# (mount_devices will be used when configuring GRUB in modify_grub_default)
# (fs_devices will be used when configuring the fstab file)
self.mount_devices = ap.get_mount_devices()
self.fs_devices = ap.get_fs_devices()
except subprocess.CalledProcessError as e:
logging.error(e.output)
self.queue_event('error', _("Error creating partitions and their filesystems"))
return
if self.method == 'alongside':
# Alongside method shrinks selected partition
# and creates root and swap partition in the available space
boot_partition, root_partition = shrink(self.mount_devices["alongside"])
# Alongside method formats root by default (as it is always a new partition)
(error, msg) = fs.create_fs(self.mount_devices["/"], "ext4")
if self.method == 'advanced':
root_partition = self.mount_devices["/"]
if root_partition in self.fs_devices:
root_fs = self.fs_devices[root_partition]
else:
root_fs = "ext4"
if "/boot" in self.mount_devices:
boot_partition = self.mount_devices["/boot"]
else:
boot_partition = ""
if "swap" in self.mount_devices:
swap_partition = self.mount_devices["swap"]
else:
swap_partition = ""
# NOTE: Advanced method formats root by default in installation_advanced
# Create the directory where we will mount our new root partition
if not os.path.exists(self.dest_dir):
os.mkdir(self.dest_dir)
# Mount root and boot partitions (only if it's needed)
# Not doing this in automatic mode as AutoPartition class mounts the root and boot devices itself.
if self.method == 'alongside' or self.method == 'advanced':
try:
txt = _("Mounting partition %s into %s directory") % (root_partition, self.dest_dir)
self.queue_event('debug', txt)
subprocess.check_call(['mount', root_partition, self.dest_dir])
# We also mount the boot partition if it's needed
subprocess.check_call(['mkdir', '-p', '%s/boot' % self.dest_dir])
if "/boot" in self.mount_devices:
txt = _("Mounting partition %s into %s/boot directory") % (boot_partition, self.dest_dir)
self.queue_event('debug', txt)
subprocess.check_call(['mount', boot_partition, "%s/boot" % self.dest_dir])
except subprocess.CalledProcessError as e:
self.queue_fatal_event(_("Couldn't mount root and boot partitions"))
return False
# In advanced mode, mount all partitions (root and boot are already mounted)
if self.method == 'advanced':
for path in self.mount_devices:
mp = self.mount_devices[path]
if mp != root_partition and mp != boot_partition and mp != swap_partition:
try:
mount_dir = self.dest_dir + path
if not os.path.exists(mount_dir):
os.makedirs(mount_dir)
txt = _("Mounting partition %s into %s directory") % (mp, mount_dir)
self.queue_event('debug', txt)
subprocess.check_call(['mount', mp, mount_dir])
except subprocess.CalledProcessError as e:
# We will continue as root and boot are already mounted
self.queue_event('debug', _("Can't mount %s in %s") % (mp, mount_dir))
# Nasty workaround:
# If pacman was stoped and /var is in another partition than root
# (so as to be able to resume install), database lock file will still be in place.
# We must delete it or this new installation will fail
db_lock = os.path.join(self.dest_dir, "var/lib/pacman/db.lck")
if os.path.exists(db_lock):
with misc.raised_privileges():
os.remove(db_lock)
logging.debug("%s deleted" % db_lock)
# Create some needed folders
try:
subprocess.check_call(['mkdir', '-p', '%s/var/lib/pacman' % self.dest_dir])
subprocess.check_call(['mkdir', '-p', '%s/etc/pacman.d/gnupg/' % self.dest_dir])
subprocess.check_call(['mkdir', '-p', '%s/var/log/' % self.dest_dir])
except subprocess.CalledProcessError as e:
self.queue_fatal_event(_("Can't create necessary directories on destination system"))
return False
try:
self.queue_event('debug', 'Selecting packages...')
self.select_packages()
self.queue_event('debug', 'Packages selected')
if self.settings.get("use_aria2"):
self.queue_event('debug', 'Downloading packages...')
self.download_packages()
self.queue_event('debug', 'Packages downloaded.')
cache_dir = self.settings.get("cache")
if len(cache_dir) > 0:
self.copy_cache_files(cache_dir)
self.queue_event('debug', 'Installing packages...')
self.install_packages()
self.queue_event('debug', 'Packages installed.')
if self.settings.get('install_bootloader'):
self.queue_event('debug', 'Installing bootloader...')
self.install_bootloader()
self.queue_event('debug', 'Configuring system...')
self.configure_system()
self.queue_event('debug', 'System configured.')
except subprocess.CalledProcessError as e:
self.queue_fatal_event("CalledProcessError.output = %s" % e.output)
return False
except InstallError as e:
self.queue_fatal_event(e.value)
return False
except:
# unknown error
self.running = False
self.error = True
return False
# installation finished ok
self.queue_event("finished")
self.running = False
self.error = False
return True
def download_packages(self):
conf_file = "/tmp/pacman.conf"
cache_dir = "%s/var/cache/pacman/pkg" % self.dest_dir
download.DownloadPackages(self.packages, conf_file, cache_dir, self.callback_queue)
# creates temporary pacman.conf file
def create_pacman_conf(self):
self.queue_event('debug', "Creating a temporary pacman.conf for %s architecture" % self.arch)
# Common repos
# TODO: Instead of hardcoding pacman.conf, we could use an external file
with open("/tmp/pacman.conf", "wt") as tmp_file:
tmp_file.write("[options]\n")
tmp_file.write("Architecture = auto\n")
tmp_file.write("SigLevel = PackageOptional\n")
tmp_file.write("RootDir = %s\n" % self.dest_dir)
tmp_file.write("DBPath = %s/var/lib/pacman/\n" % self.dest_dir)
tmp_file.write("CacheDir = %s/var/cache/pacman/pkg\n" % self.dest_dir)
tmp_file.write("LogFile = /tmp/pacman.log\n\n")
# ¿?
#tmp_file.write("CacheDir = /packages/core-%s/pkg\n" % self.arch)
#tmp_file.write("CacheDir = /packages/core-any/pkg\n\n")
tmp_file.write("# Repositories\n\n")
tmp_file.write("[core]\n")
tmp_file.write("SigLevel = PackageRequired\n")
tmp_file.write("Include = /etc/pacman.d/mirrorlist\n\n")
tmp_file.write("[extra]\n")
tmp_file.write("SigLevel = PackageRequired\n")
tmp_file.write("Include = /etc/pacman.d/mirrorlist\n\n")
tmp_file.write("[community]\n")
tmp_file.write("SigLevel = PackageRequired\n")
tmp_file.write("Include = /etc/pacman.d/mirrorlist\n\n")
# x86_64 repos only
if self.arch == 'x86_64':
tmp_file.write("[multilib]\n")
tmp_file.write("SigLevel = PackageRequired\n")
tmp_file.write("Include = /etc/pacman.d/mirrorlist\n\n")
## Init pyalpm
try:
self.pac = pac.Pac("/tmp/pacman.conf", self.callback_queue)
except:
raise InstallError("Can't initialize pyalpm.")
# Add gnupg pacman files to installed system
def prepare_pacman_keychain(self):
dest_path = os.path.join(self.dest_dir, "etc/pacman.d/gnupg")
try:
misc.copytree('/etc/pacman.d/gnupg', dest_path)
except (FileExistsError, shutil.Error) as e:
# log error but continue anyway
logging.exception(e)
# Configures pacman and syncs db on destination system
def prepare_pacman(self):
dirs = [ "var/cache/pacman/pkg", "var/lib/pacman" ]
for d in dirs:
mydir = os.path.join(self.dest_dir, d)
if not os.path.exists(mydir):
os.makedirs(mydir)
self.prepare_pacman_keychain()
self.pac.do_refresh()
# Prepare pacman and get package list from Internet
def select_packages(self):
self.create_pacman_conf()
self.prepare_pacman()
if len(self.alternate_package_list) > 0:
packages_xml = self.alternate_package_list
else:
'''The list of packages is retrieved from an online XML to let us
control the pkgname in case of any modification'''
self.queue_event('info', "Getting package list...")
try:
packages_xml = urllib.request.urlopen('http://install.antergos.com/packages-%s.xml' % info.cnchi_VERSION[:3], timeout=5)
except urllib.error.URLError as e:
# If the installer can't retrieve the remote file, try to install with a local
# copy, that may not be updated
self.queue_event('debug', _("Can't retrieve remote package list, using a local file instead."))
data_dir = self.settings.get("data")
packages_xml = os.path.join(data_dir, 'packages.xml')
tree = etree.parse(packages_xml)
root = tree.getroot()
self.queue_event('debug', _("Adding all desktops common packages"))
for child in root.iter('common_system'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if self.desktop != "nox":
for child in root.iter('graphic_system'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
self.queue_event('debug', _("Adding '%s' desktop packages") % self.desktop)
for child in root.iter(self.desktop + '_desktop'):
for pkg in child.iter('pkgname'):
# If package is Desktop Manager, save name to
# activate the correct service
if pkg.attrib.get('dm'):
self.desktop_manager = pkg.attrib.get('name')
if pkg.attrib.get('nm'):
self.network_manager = pkg.attrib.get('name')
if pkg.attrib.get('conflicts'):
self.conflicts.append(pkg.attrib.get('conflicts'))
self.packages.append(pkg.text)
else:
# Add specific NoX/Base packages
for child in root.iter('nox'):
for pkg in child.iter('pkgname'):
if pkg.attrib.get('nm'):
self.network_manager = pkg.attrib.get('name')
if pkg.attrib.get('conflicts'):
self.conflicts.append(pkg.attrib.get('conflicts'))
self.packages.append(pkg.text)
# Always install ntp as the user may want to activate it
# later (or not) in the timezone screen
for child in root.iter('ntp'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
# Install graphic cards drivers except in NoX installs
if self.desktop != "nox":
self.queue_event('debug', _("Getting graphics card drivers"))
graphics = self.get_graphics_card()
if "ati " in graphics:
for child in root.iter('ati'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
self.card.append('ati')
if "nvidia" in graphics:
for child in root.iter('nvidia'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
self.card.append('nvidia')
if "intel" in graphics or "lenovo" in graphics:
for child in root.iter('intel'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
self.card.append('intel')
if "virtualbox" in graphics:
for child in root.iter('virtualbox'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "vmware" in graphics:
for child in root.iter('vmware'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "via " in graphics:
for child in root.iter('via'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
# Add xorg-drivers group if cnchi can't figure it out
# the graphic card driver.
if graphics not in ('ati ', 'nvidia', 'intel', 'virtualbox' \
'vmware', 'via '):
self.packages.append('xorg-drivers')
# Add filesystem packages
self.queue_event('debug', _("Adding filesystem packages"))
fs_types = subprocess.check_output(\
["blkid", "-c", "/dev/null", "-o", "value", "-s", "TYPE"]).decode()
for iii in self.fs_devices:
fs_types += self.fs_devices[iii]
if "ntfs" in fs_types:
for child in root.iter('ntfs'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "btrfs" in fs_types:
for child in root.iter('btrfs'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "nilfs2" in fs_types:
for child in root.iter('nilfs2'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "ext" in fs_types:
for child in root.iter('ext'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "reiserfs" in fs_types:
for child in root.iter('reiserfs'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "xfs" in fs_types:
for child in root.iter('xfs'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "jfs" in fs_types:
for child in root.iter('jfs'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
if "vfat" in fs_types:
for child in root.iter('vfat'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
# Check for user desired features and add them to our installation
self.queue_event('debug', _("Check for user desired features and add them to our installation"))
self.add_packages_for_selected_features(root)
# Add chinese fonts
lang_code = self.settings.get("language_code")
if lang_code == "zh_TW" or lang_code == "zh_CN":
self.queue_event('debug', 'Selecting chinese fonts.')
for child in root.iter('chinese'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
# Add bootloader packages if needed
self.queue_event('debug', _("Adding bootloader packages if needed"))
if self.settings.get('install_bootloader'):
bt = self.settings.get('bootloader_type')
if bt == "GRUB2":
for child in root.iter('grub'):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
elif bt == "UEFI_x86_64":
for child in root.iter('grub-efi'):
if root.attrib.get('uefiarch') == "x86_64":
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
elif bt == "UEFI_i386":
for child in root.iter('grub-efi'):
if root.attrib.get('uefiarch') == "i386":
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
def add_packages_for_selected_features(self, root):
features = [ "bluetooth", "cups", "office", "visual", "firewall", "third_party" ]
for feature in features:
# Add necessary packages for user desired features to our install list
if self.settings.get("feature_" + feature):
self.queue_event('debug', 'Adding packages for "%s" feature.' % feature)
for child in root.iter(feature):
for pkg in child.iter('pkgname'):
self.packages.append(pkg.text)
# Add libreoffice language package
if self.settings.get('feature_office'):
pkg = ""
lang_name = self.settings.get("language_name").lower()
if lang_name == "english":
# There're some English variants available but not all of them.
lang_packs = [ 'en-GB', 'en-US', 'en-ZA' ]
locale = self.settings.get('locale').split('.')[0]
locale = locale.replace('_', '-')
if locale in lang_packs:
pkg = "libreoffice-%s" % locale
else:
# Install American English if there is not an specific
# language package available.
pkg = "libreoffice-en-US"
else:
# All the other language packs use their language code
lang_code = self.settings.get('language_code')
lang_code = lang_code.replace('_', '-')
pkg = "libreoffice-%s" % lang_code
self.packages.append(pkg)
def get_graphics_card(self):
p1 = subprocess.Popen(["hwinfo", "--gfxcard"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Model:[[:space:]]"],\
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
out, err = p2.communicate()
return out.decode().lower()
def install_packages(self):
self.chroot_mount_special_dirs()
self.pac.do_install(self.packages, self.conflicts)
self.chroot_umount_special_dirs()
def chroot_mount_special_dirs(self):
# Do not remount
if self.special_dirs_mounted:
self.queue_event('debug', _("Special dirs already mounted."))
return
dirs = [ "sys", "proc", "dev" ]
for d in dirs:
mydir = os.path.join(self.dest_dir, d)
if not os.path.exists(mydir):
os.makedirs(mydir)
mydir = os.path.join(self.dest_dir, "sys")
subprocess.check_call(["mount", "-t", "sysfs", "sysfs", mydir])
subprocess.check_call(["chmod", "555", mydir])
mydir = os.path.join(self.dest_dir, "proc")
subprocess.check_call(["mount", "-t", "proc", "proc", mydir])
subprocess.check_call(["chmod", "555", mydir])
mydir = os.path.join(self.dest_dir, "dev")
subprocess.check_call(["mount", "-o", "bind", "/dev", mydir])
self.special_dirs_mounted = True
def chroot_umount_special_dirs(self):
# Do not umount if they're not mounted
if not self.special_dirs_mounted:
self.queue_event('debug', _("Special dirs already not mounted."))
return
dirs = [ "proc", "sys", "dev" ]
for d in dirs:
mydir = os.path.join(self.dest_dir, d)
try:
subprocess.check_call(["umount", mydir])
except:
self.queue_event('warning', _("Unable to umount %s") % mydir)
self.special_dirs_mounted = False
def chroot(self, cmd, stdin=None, stdout=None):
run = [ 'chroot', self.dest_dir ]
for c in cmd:
run.append(c)
try:
proc = subprocess.Popen(run,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = proc.communicate()[0]
logging.debug(out.decode())
except OSError as e:
logging.exception("Error running command: %s" % e.strerror)
raise
def is_running(self):
return self.running
def is_ok(self):
return not self.error
def copy_network_config(self):
source_nm = "/etc/NetworkManager/system-connections/"
target_nm = "%s/etc/NetworkManager/system-connections/" % self.dest_dir
# Sanity checks. We don't want to do anything if a network
# configuration already exists on the target
if os.path.exists(source_nm) and os.path.exists(target_nm):
for network in os.listdir(source_nm):
# Skip LTSP live
if network == "LTSP":
continue
source_network = os.path.join(source_nm, network)
target_network = os.path.join(target_nm, network)
if os.path.exists(target_network):
continue
shutil.copy(source_network, target_network)
# TODO: Take care of swap partitions
def auto_fstab(self):
all_lines = []
all_lines.append("# /etc/fstab: static file system information.")
all_lines.append("#")
all_lines.append("# Use 'blkid' to print the universally unique identifier for a")
all_lines.append("# device; this may be used with UUID= as a more robust way to name devices")
all_lines.append("# that works even if disks are added and removed. See fstab(5).")
all_lines.append("#")
all_lines.append("# <file system> <mount point> <type> <options> <dump> <pass>")
all_lines.append("#")
root_ssd = 0
for path in self.mount_devices:
opts = 'defaults'
chk = '0'
parti = self.mount_devices[path]
info = fs.get_info(parti)
uuid = info['UUID']
if parti in self.fs_devices:
myfmt = self.fs_devices[parti]
else:
# It hasn't any filesystem defined
continue
# TODO: Take care of swap partitions
if "swap" in myfmt:
logging.debug("Add to fstab : UUID=%s %s %s %s 0 %s" % (uuid, path, myfmt, opts, chk))
all_lines.append("UUID=%s %s %s %s 0 %s" % (uuid, path, myfmt, opts, chk))
continue
# Avoid adding a partition to fstab when
# it has no mount point (swap has been checked before)
if path == "":
continue
if path == '/':
chk = '1'
opts = "rw,relatime,data=ordered"
else:
full_path = os.path.join(self.dest_dir, path)
subprocess.check_call(["mkdir", "-p", full_path])
if self.ssd != None:
for i in self.ssd:
if i in self.mount_devices[path] and self.ssd[i]:
opts = 'defaults,noatime,nodiratime'
# As of linux kernel version 3.7, the following
# filesystems support TRIM: ext4, btrfs, JFS, and XFS.
# If using a TRIM supported SSD, discard is a valid mount option for swap
if myfmt == 'ext4' or myfmt == 'btrfs' or myfmt == 'jfs' or myfmt == 'xfs' or myfmt == 'swap':
opts += ',discard'
if path == '/':
root_ssd = 1
all_lines.append("UUID=%s %s %s %s 0 %s" % (uuid, path, myfmt, opts, chk))
if root_ssd:
all_lines.append("tmpfs /tmp tmpfs defaults,noatime,mode=1777 0 0")
full_text = '\n'.join(all_lines)
full_text += '\n'
with open('%s/etc/fstab' % self.dest_dir, 'w') as f:
f.write(full_text)
def install_bootloader(self):
bt = self.settings.get('bootloader_type')
if bt == "GRUB2":
self.install_bootloader_grub2_bios()
elif bt == "UEFI_x86_64" or bt == "UEFI_i386":
self.install_bootloader_grub2_efi(bt)
def modify_grub_default(self):
# If using LUKS, we need to modify GRUB_CMDLINE_LINUX to load our root encrypted partition
# This scheme can be used in the automatic installation option only (at this time)
if self.method == 'automatic' and self.settings.get('use_luks'):
default_dir = os.path.join(self.dest_dir, "etc/default")
if not os.path.exists(default_dir):
os.mkdir(default_dir)
root_device = self.mount_devices["/"]
boot_device = self.mount_devices["/boot"]
# Let GRUB automatically add the kernel parameters for root encryption
if self.settings.get("luks_key_pass") == "":
default_line = 'GRUB_CMDLINE_LINUX="cryptdevice=%s:cryptArch cryptkey=%s:ext2:/.keyfile"' % (root_device, boot_device)
else:
default_line = 'GRUB_CMDLINE_LINUX="cryptdevice=%s:cryptArch"' % root_device
# Disable the usage of UUIDs for the rootfs:
disable_uuid_line = 'GRUB_DISABLE_LINUX_UUID=true'
default_grub = os.path.join(default_dir, "grub")
with open(default_grub) as f:
lines = [x.strip() for x in f.readlines()]
for e in range(len(lines)):
if lines[e].startswith("#GRUB_CMDLINE_LINUX") or lines[e].startswith("GRUB_CMDLINE_LINUX"):
lines[e] = default_line
elif lines[e].startswith("#GRUB_DISABLE_LINUX_UUID") or lines[e].startswith("GRUB_DISABLE_LINUX_UUID"):
lines[e] = disable_uuid_line
with open(default_grub, "w") as f:
f.write("\n".join(lines) + "\n")
def install_bootloader_grub2_bios(self):
grub_device = self.settings.get('bootloader_device')
self.queue_event('info', _("Installing GRUB(2) BIOS boot loader in %s") % grub_device)
self.modify_grub_default()
self.chroot_mount_special_dirs()
self.chroot(['grub-install', \
'--directory=/usr/lib/grub/i386-pc', \
'--target=i386-pc', \
'--boot-directory=/boot', \
'--recheck', \
grub_device])
self.chroot_umount_special_dirs()
grub_d_dir = os.path.join(self.dest_dir, "etc/grub.d")
if not os.path.exists(grub_d_dir):
os.makedirs(grub_d_dir)
try:
shutil.copy2("/arch/10_linux", grub_d_dir)
except FileNotFoundError:
try:
shutil.copy2("/etc/grub.d/10_linux", grub_d_dir)
except FileNotFoundError:
self.queue_event('warning', _("ERROR installing GRUB(2) BIOS."))
return
except FileExistsError:
pass
except FileExistsError:
# ignore if already exists
pass
self.install_bootloader_grub2_locales()
locale = self.settings.get("locale")
self.chroot_mount_special_dirs()
self.chroot(['sh', '-c', 'LANG=%s grub-mkconfig -o /boot/grub/grub.cfg' % locale])
self.chroot_umount_special_dirs()
core_path = os.path.join(self.dest_dir, "boot/grub/i386-pc/core.img")
if os.path.exists(core_path):
self.queue_event('info', _("GRUB(2) BIOS has been successfully installed."))
else:
self.queue_event('warning', _("ERROR installing GRUB(2) BIOS."))
def install_bootloader_grub2_efi(self, arch):
uefi_arch = "x86_64"
spec_uefi_arch = "x64"
if bt == "UEFI_i386":
uefi_arch = "i386"
spec_uefi_arch = "ia32"
grub_device = self.settings.get('bootloader_device')
self.queue_event('info', _("Installing GRUB(2) UEFI %s boot loader in %s") % (uefi_arch, grub_device))
self.modify_grub_default()
self.chroot_mount_special_dirs()
self.chroot(['grub-install', \
'--directory=/usr/lib/grub/%s-efi' % uefi_arch, \
'--target=%s-efi' % uefi_arch, \
'--bootloader-id="arch_grub"', \
'--boot-directory=/boot', \
'--recheck', \
grub_device])
self.chroot_umount_special_dirs()
self.install_bootloader_grub2_locales()
locale = self.settings.get("locale")
self.chroot_mount_special_dirs()
self.chroot(['sh', '-c', 'LANG=%s grub-mkconfig -o /boot/grub/grub.cfg' % locale])
self.chroot_umount_special_dirs()
grub_cfg = "%s/boot/grub/grub.cfg" % self.dest_dir
grub_standalone = "%s/boot/efi/EFI/arch_grub/grub%s_standalone.cfg" % (self.dest_dir, spec_uefi_arch)
try:
shutil.copy2(grub_cfg, grub_standalone)
except FileNotFoundError:
self.queue_event('warning', _("ERROR installing GRUB(2) configuration file."))
return
except FileExistsError:
# ignore if already exists
pass
self.chroot_mount_special_dirs()
self.chroot(['grub-mkstandalone', \
'--directory=/usr/lib/grub/%s-efi' % uefi_arch, \
'--format=%s-efi' % uefi_arch, \
'--compression="xz"', \
'--output="/boot/efi/EFI/arch_grub/grub%s_standalone.efi' % spec_uefi_arch, \
'boot/grub/grub.cfg'])
self.chroot_umount_special_dirs()
# TODO: Create a boot entry for Arch in the UEFI boot manager (is this necessary?)
def install_bootloader_grub2_locales(self):
dest_locale_dir = os.path.join(self.dest_dir, "boot/grub/locale")
if not os.path.exists(dest_locale_dir):
os.makedirs(dest_locale_dir)
mo = os.path.join(self.dest_dir, "usr/share/locale/en@quot/LC_MESSAGES/grub.mo")
try:
shutil.copy2(mo, os.path.join(dest_locale_dir, "en.mo"))
except FileNotFoundError:
self.queue_event('warning', _("ERROR installing GRUB(2) locale."))
except FileExistsError:
# ignore if already exists
pass
def enable_services(self, services):
for name in services:
name += '.service'
self.chroot(['systemctl', 'enable', name])
def change_user_password(self, user, new_password):
try:
shadow_password = crypt.crypt(new_password,"$6$%s$" % user)
except:
self.queue_event('warning', _('Error creating password hash for user %s') % user)
return False
try:
self.chroot(['usermod', '-p', shadow_password, user])
except:
self.queue_event('warning', _('Error changing password for user %s') % user)
return False
return True
def auto_timesetting(self):
subprocess.check_call(["hwclock", "--systohc", "--utc"])
shutil.copy2("/etc/adjtime", "%s/etc/" % self.dest_dir)
def set_mkinitcpio_hooks_and_modules(self, hooks, modules):
self.queue_event('debug', 'Setting hooks and modules in mkinitcpio.conf')
self.queue_event('debug', 'HOOKS="%s"' % ' '.join(hooks))
self.queue_event('debug', 'MODULES="%s"' % ' '.join(modules))
with open("/etc/mkinitcpio.conf") as f:
mklins = [x.strip() for x in f.readlines()]
for e in range(len(mklins)):
if mklins[e].startswith("HOOKS"):
mklins[e] = 'HOOKS="%s"' % ' '.join(hooks)
elif mklins[e].startswith("MODULES"):
mklins[e] = 'MODULES="%s"' % ' '.join(modules)
with open("%s/etc/mkinitcpio.conf" % self.dest_dir, "w") as f:
f.write("\n".join(mklins) + "\n")
def run_mkinitcpio(self):
# Add lvm and encrypt hooks if necessary
hooks = [ "base", "udev", "autodetect", "modconf", "block" ]
modules = []
# It is important that the encrypt hook comes before the filesystems hook
# (in case you are using LVM on LUKS, the order should be: encrypt lvm2 filesystems)
if self.settings.get("use_luks"):
hooks.append("encrypt")
modules.extend([ "dm_mod", "dm_crypt", "ext4", "aes-x86_64", "sha256", "sha512" ])
if self.blvm or self.settings.get("use_lvm"):
hooks.append("lvm2")
hooks.extend([ "filesystems", "keyboard", "fsck" ])
self.set_mkinitcpio_hooks_and_modules(hooks, modules)
# run mkinitcpio on the target system
self.chroot_mount_special_dirs()
self.chroot(["/usr/bin/mkinitcpio", "-p", self.kernel_pkg])
self.chroot_umount_special_dirs()
# Uncomment selected locale in /etc/locale.gen
def uncomment_locale_gen(self, locale):
#self.chroot(['sed', '-i', '-r', '"s/#(.*%s)/\1/g"' % locale, "/etc/locale.gen"])
text = []
with open("%s/etc/locale.gen" % self.dest_dir, "rt") as gen:
text = gen.readlines()
with open("%s/etc/locale.gen" % self.dest_dir, "wt") as gen:
for line in text:
if locale in line and line[0] == "#":
# uncomment line
line = line[1:]
gen.write(line)
def check_output(self, command):
return subprocess.check_output(command.split()).decode().strip("\n")
def encrypt_home(self):
# WARNING: ecryptfs-utils, rsync and lsof packages are needed.
# They should be added in the livecd AND in the "to install packages" xml list
# Load ecryptfs module
subprocess.check_call(['modprobe', 'ecryptfs'])
# Add it to /install/etc/modules-load.d/
with open("%s/etc/modules-load.d/ecryptfs.conf", "wt") as f:
f.write("ecryptfs\n")
# Get the username and passwd
username = self.settings.get('username')
passwd = self.settings.get('password')
# Migrate user home directory
# See http://blog.dustinkirkland.com/2011/02/long-overdue-introduction-ecryptfs.html
self.chroot_mount_special_dirs()
command = "LOGINPASS=%s chroot %s ecryptfs-migrate-home -u %s" % (passwd, self.dest_dir, username)
outp = self.check_output(command)
self.chroot_umount_special_dirs()
with open(os.path.join(self.dest_dir, "root/cnchi-ecryptfs.log", "wt")) as f:
f.write(outp)
# Critically important, USER must login before the next reboot to complete the migration
# User should run ecryptfs-unwrap-passphrase and write down the generated passphrase
subprocess.check_call(['su', username])
def copy_cache_files(self, cache_dir):
# Check in case user has given a wrong folder
if not os.path.exists(cache_dir):
return
self.queue_event('info', 'Copying xz files from cache...')
dest_dir = os.path.join(self.dest_dir, "var/cache/pacman/pkg")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
self.copyfiles_progress(cache_dir, dest_dir)
def copyfiles_progress(self, src, dst):
percent = 0.0
items = os.listdir(src)
step = 1.0 / len(items)
for item in items:
self.queue_event("percent", percent)
s = os.path.join(src, item)
d = os.path.join(dst, item)
try:
shutil.copy2(s, d)
except (FileExistsError, shutil.Error) as e:
pass
percent += step
def setup_features(self):
#features = [ "bluetooth", "cups", "office", "visual", "firewall", "third_party" ]
if self.settings.get("feature_bluetooth"):
self.queue_event('debug', "Configuring bluetooth...")
service = os.path.join(self.dest_dir, "usr/lib/systemd/system/bluetooth.service")
if os.path.exists(service):
self.enable_services(['bluetooth'])
if self.settings.get("feature_cups"):
self.queue_event('debug', "Configuring CUPS...")
service = os.path.join(self.dest_dir, "usr/lib/systemd/system/cups.service")
if os.path.exists(service):
self.enable_services(['cups'])
if self.settings.get("feature_office"):
self.queue_event('debug', "Configuring libreoffice...")
if self.settings.get("feature_visual"):
self.queue_event('debug', "Configuring Compositing manager...")
if self.settings.get("feature_firewall"):
self.queue_event('debug', "Configuring firewall...")
# A very simplistic configuration which will deny all by default,
# allow any protocol from inside a 192.168.0.1-192.168.0.255 LAN,
# and allow incoming Transmission and SSH traffic from anywhere:
self.chroot_mount_special_dirs()
self.chroot(["ufw", "default", "deny"])
toallow = misc.get_network()
if toallow:
self.chroot(["ufw", "allow", "from", toallow])
#self.chroot(["ufw", "allow", "from", "192.168.0.0/24"])
#self.chroot(["ufw", "allow", "from", "192.168.1.0/24"])
#self.chroot(["ufw", "allow", "from", "192.168.2.0/24"])
self.chroot(["ufw", "allow", "Transmission"])
self.chroot(["ufw", "allow", "SSH"])
self.chroot(["ufw", "enable"])
self.chroot_umount_special_dirs()
service = os.path.join(self.dest_dir, "usr/lib/systemd/system/ufw.service")
if os.path.exists(service):
self.enable_services(['ufw'])
def configure_system(self):
# final install steps
# set clock, language, timezone
# run mkinitcpio
# populate pacman keyring
# setup systemd services
# ... check configure_system from arch-setup
self.queue_event('action', _("Configuring your new system"))
self.auto_fstab()
self.queue_event('debug', 'fstab file generated.')
# Copy configured networks in Live medium to target system
if self.network_manager == 'NetworkManager':
self.copy_network_config()
# TODO: Test copy profile. Also think a bit more about it.
# Maybe just installing netctl is enough.
'''
elif self.network_manager == 'netctl':
if misc.is_wireless_enabled():
profile = 'wireless-wpa'
else:
profile = 'ethernet-dhcp'
self.queue_event('debug', 'Cnchi will configure netctl using the %s profile' % profile)
src_path = os.path.join(self.dest_dir, 'etc/netctl/examples/%s' % profile)
dst_path = os.path.join(self.dest_dir, 'etc/netctl/%s' % profile)
shutil.copy(src_path, dst_path)
self.chroot(['netctl', 'enable', profile])
self.queue_event('debug', 'Network configuration copied.')
'''
# copy mirror list
mirrorlist_path = os.path.join(self.dest_dir, 'etc/pacman.d/mirrorlist')
try:
shutil.copy2('/etc/pacman.d/mirrorlist', mirrorlist_path)
self.queue_event('debug', 'Mirror list copied.')
except:
pass
# Copy important config files to target system
files = [ "/etc/pacman.conf", "/etc/yaourtrc" ]
for path in files:
try:
shutil.copy2(path, os.path.join(self.dest_dir, 'etc/'))
except:
pass
self.queue_event('debug', 'Important configuration files copied.')
desktop = self.settings.get('desktop')
# enable services
if desktop != "nox":
self.enable_services([ self.desktop_manager, "ModemManager" ])
self.enable_services([ self.network_manager ])
self.queue_event('debug', 'Enabled installed services.')
# Wait FOREVER until the user sets the timezone
while self.settings.get('timezone_done') is False:
# wait five seconds and try again
time.sleep(5)
if self.settings.get("use_ntp"):
self.enable_services(["ntpd"])
# set timezone
zoneinfo_path = os.path.join("/usr/share/zoneinfo", self.settings.get("timezone_zone"))
self.chroot(['ln', '-s', zoneinfo_path, "/etc/localtime"])
self.queue_event('debug', 'Timezone set.')
# Wait FOREVER until the user sets his params
while self.settings.get('user_info_done') is False:
# wait five seconds and try again
time.sleep(5)
# Set user parameters
username = self.settings.get('username')
fullname = self.settings.get('fullname')
password = self.settings.get('password')
hostname = self.settings.get('hostname')
sudoers_path = os.path.join(self.dest_dir, "etc/sudoers.d/10-installer")
with open(sudoers_path, "wt") as sudoers:
sudoers.write('%s ALL=(ALL) ALL\n' % username)
subprocess.check_call(["chmod", "440", sudoers_path])
self.queue_event('debug', 'Sudo configuration for user %s done.' % username)
self.chroot(['useradd', '-m', '-s', '/bin/bash', \
'-g', 'users', '-G', 'lp,video,network,storage,wheel,audio', \
username])
self.queue_event('debug', 'User %s added.' % username)
self.change_user_password(username, password)
self.chroot(['chfn', '-f', fullname, username])
self.chroot(['chown', '-R', '%s:users' % username, "/home/%s" % username])
hostname_path = os.path.join(self.dest_dir, "etc/hostname")
if not os.path.exists(hostname_path):
with open(hostname_path, "wt") as f:
f.write(hostname)
self.queue_event('debug', 'Hostname %s set.' % hostname)
# User password is the root password
self.change_user_password('root', password)
self.queue_event('debug', 'Set the same password to root.')
# Generate locales
keyboard_layout = self.settings.get("keyboard_layout")
keyboard_variant = self.settings.get("keyboard_variant")
locale = self.settings.get("locale")
self.queue_event('info', _("Generating locales..."))
self.uncomment_locale_gen(locale)
self.chroot(['locale-gen'])
locale_conf_path = os.path.join(self.dest_dir, "etc/locale.conf")
with open(locale_conf_path, "wt") as locale_conf:
locale_conf.write('LANG=%s \n' % locale)
locale_conf.write('LC_COLLATE=C \n')
# Set /etc/vconsole.conf
vconsole_conf_path = os.path.join(self.dest_dir, "etc/vconsole.conf")
with open(vconsole_conf_path, "wt") as vconsole_conf:
vconsole_conf.write('KEYMAP=%s \n' % keyboard_layout)
self.queue_event('info', _("Adjusting hardware clock..."))
self.auto_timesetting()
if desktop != "nox":
self.queue_event('debug', "Set /etc/X11/xorg.conf.d/00-keyboard.conf for the xkblayout")
# Set /etc/X11/xorg.conf.d/00-keyboard.conf for the xkblayout
xorg_conf_xkb_path = os.path.join(self.dest_dir, "etc/X11/xorg.conf.d/00-keyboard.conf")
with open(xorg_conf_xkb_path, "wt") as xorg_conf_xkb:
xorg_conf_xkb.write("# Read and parsed by systemd-localed. It's probably wise not to edit this file\n")
xorg_conf_xkb.write('# manually too freely.\n')
xorg_conf_xkb.write('Section "InputClass"\n')
xorg_conf_xkb.write(' Identifier "system-keyboard"\n')
xorg_conf_xkb.write(' MatchIsKeyboard "on"\n')
xorg_conf_xkb.write(' Option "XkbLayout" "%s"\n' % keyboard_layout)
if keyboard_variant != '':
xorg_conf_xkb.write(' Option "XkbVariant" "%s"\n' % keyboard_variant)
xorg_conf_xkb.write('EndSection\n')
self.queue_event('debug', "00-keyboard.conf written.")
# Set autologin if selected
if self.settings.get('require_password') is False:
self.queue_event('info', _("%s: Enable automatic login for user %s.") % (self.desktop_manager, username))
# Systems with GDM as Desktop Manager
if self.desktop_manager == 'gdm':
gdm_conf_path = os.path.join(self.dest_dir, "etc/gdm/custom.conf")
with open(gdm_conf_path, "wt") as gdm_conf:
gdm_conf.write('# Enable automatic login for user\n')
gdm_conf.write('[daemon]\n')
gdm_conf.write('AutomaticLogin=%s\n' % username)
gdm_conf.write('AutomaticLoginEnable=True\n')
# Systems with KDM as Desktop Manager
elif self.desktop_manager == 'kdm':
kdm_conf_path = os.path.join(self.dest_dir, "usr/share/config/kdm/kdmrc")
text = []
with open(kdm_conf_path, "rt") as kdm_conf:
text = kdm_conf.readlines()
with open(kdm_conf_path, "wt") as kdm_conf:
for line in text:
if '#AutoLoginEnable=true' in line:
line = '#AutoLoginEnable=true \n'
line = line[1:]
if 'AutoLoginUser=' in line:
line = 'AutoLoginUser=%s \n' % username
kdm_conf.write(line)
# Systems with LXDM as Desktop Manager
elif self.desktop_manager == 'lxdm':
lxdm_conf_path = os.path.join(self.dest_dir, "etc/lxdm/lxdm.conf")
text = []
with open(lxdm_conf_path, "rt") as lxdm_conf:
text = lxdm_conf.readlines()
with open(lxdm_conf_path, "wt") as lxdm_conf:
for line in text:
if '# autologin=dgod' in line and line[0] == "#":
# uncomment line
line = '# autologin=%s' % username
line = line[1:]
lxdm_conf.write(line)
# Systems with LightDM as the Desktop Manager
elif self.desktop_manager == 'lightdm':
lightdm_conf_path = os.path.join(self.dest_dir, "etc/lightdm/lightdm.conf")
# Ideally, use configparser for the ini conf file, but just do
# a simple text replacement for now
text = []
with open(lightdm_conf_path, "rt") as lightdm_conf:
text = lightdm_conf.readlines()
with open(lightdm_conf_path, "wt") as lightdm_conf:
for line in text:
if '#autologin-user=' in line:
line = 'autologin-user=%s\n' % username
lightdm_conf.write(line)
# Let's start without using hwdetect for mkinitcpio.conf.
# I think it should work out of the box most of the time.
# This way we don't have to fix deprecated hooks.
# NOTE: With LUKS or LVM maybe we'll have to fix deprecated hooks.
self.queue_event('info', _("Running mkinitcpio..."))
self.run_mkinitcpio()
self.queue_event('debug', "Call post-install script to execute gsettings commands")
# Call post-install script to execute gsettings commands
script_path_postinstall = os.path.join(self.settings.get('cnchi'), "scripts", _postinstall_script)
subprocess.check_call(["/usr/bin/bash", script_path_postinstall, \
username, self.dest_dir, self.desktop, keyboard_layout, keyboard_variant])
# In openbox "desktop", the postinstall script writes /etc/slim.conf
# so we have to modify it here (after running the script).
# Set autologin if selected
if self.settings.get('require_password') is False and \
self.desktop_manager == 'slim':
slim_conf_path = os.path.join(self.dest_dir, "etc/slim.conf")
text = []
with open(slim_conf_path, "rt") as slim_conf:
text = slim_conf.readlines()
with open(slim_conf_path, "wt") as slim_conf:
for line in text:
if 'auto_login' in line:
line = 'auto_login yes\n'
if 'default_user' in line:
line = 'default_user %s\n' % username
slim_conf.write(line)
# Configure user features
self.setup_features()
# encrypt home directory if requested
if self.settings.get('encrypt_home'):
self.queue_event('debug', "Encrypting user home dir...")
self.encrypt_home()
self.queue_event('debug', "User home dir encrypted")
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright (c) 2011 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport re\nimport os\n\n\ndef XmlToString(content, encoding='utf-8', pretty=False):\n \"\"\" Writes the... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright (c) 2011 Google Inc. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport re\nimport os\n\n\ndef XmlToString(content, encoding='utf-8', pretty=False):\n \... | ```python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Fix encoding
xml_string = unicode(xml_string, 'Windows-1251').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
``` |
[
{
"content": "Here is a code file:\n```python\nimport operator\n\nfrom django.db.models.base import ModelBase\n\nfrom enumfields import EnumField\n\nfrom cvsslib.mixin import cvss_mixin_data, utils_mixin\nfrom cvsslib.base_enum import NotDefined\n\n\nclass KeyedEnumField(EnumField):\n \"\"\"\n An enum fie... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport operator\n\nfrom django.db.models.base import ModelBase\n\nfrom enumfields import EnumField\n\nfrom cvsslib.mixin import cvss_mixin_data, utils_mixin\nfrom cvsslib.base_enum import NotDefined\n\n\nclass KeyedEnumField(EnumField):\n \"\"\"\... | ```python
import operator
from django.db.models.base import ModelBase
from enumfields import EnumField
from cvsslib.mixin import cvss_mixin_data, utils_mixin
from cvsslib.base_enum import NotDefined
class KeyedEnumField(EnumField):
"""
An enum field that stores the names of the values as strings, rather than the values.
"""
def get_prep_value(self, value):
if isinstance(value, str):
return value
return value.name
def to_python(self, value):
if isinstance(value, str):
return getattr(self.enum, value)
return super().to_python(value)
def get_default(self):
if self.has_default():
if self.default is None:
return None
if isinstance(self.default, str):
return self.default
return super().get_default()
def django_mixin(module, base=ModelBase, attr_name=None):
# This is a function that takes a module (filled with enums and a function called 'calculate')
# and wires it up into a Django model that we can use.
def field_callback(name, enum_cls):
choices = enum_cls.choices()
nullable = any((isinstance(o, NotDefined) and o.value.value is None) or
o.value is None for o in enum_cls)
max_length = max(len(o.name) for o in enum_cls)
default = enum_cls.get_default()
return KeyedEnumField(enum_cls,
choices=choices,
default=default.name,
max_length=max_length,
null=nullable)
mixin_data, enum_map = cvss_mixin_data(module, field_callback)
Utils = utils_mixin(module, enum_map)
class DjangoUtils(Utils):
def debug(self):
result = []
fields = [(field.attname, getattr(self, field.attname))
for field in self._meta.get_fields()
if isinstance(field, KeyedEnumField)]
ordered_enums = sorted(fields, key=operator.itemgetter(0))
for name, value in ordered_enums:
result.append("{name} = {value}".format(name=name, value=value))
return result
class MetaClass(base):
def __new__(cls, name, bases, attrs):
cls_base = DjangoUtils
if "__module__" in attrs:
DjangoUtils.__module__ = attrs["__module__"]
bases = (cls_base,) + bases
return super().__new__(cls, name, bases, attrs)
@classmethod
def __prepare__(mcs, *args, **kwargs):
returner = super().__prepare__(*args, **kwargs)
returner.update(mixin_data)
return returner
MetaClass.django_utils = DjangoUtils
if attr_name:
DjangoUtils.__name__ = attr_name + ".django_utils"
return MetaClass
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport numpy as np\n\nfrom .. import util\nfrom ..constants import log\n\n\ndef fill_orthographic(dense):\n shape = dense.shape\n indices = np.stack(\n np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'),\n axis=-1)\n... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport numpy as np\n\nfrom .. import util\nfrom ..constants import log\n\n\ndef fill_orthographic(dense):\n shape = dense.shape\n indices = np.stack(\n np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'),\n ... | ```python
import numpy as np
from .. import util
from ..constants import log
def fill_orthographic(dense):
shape = dense.shape
indices = np.stack(
np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'),
axis=-1)
empty = np.logical_not(dense)
def fill_axis(axis):
base_local_indices = indices[..., axis]
local_indices = base_local_indices.copy()
local_indices[empty] = shape[axis]
mins = np.min(local_indices, axis=axis, keepdims=True)
local_indices = base_local_indices.copy()
local_indices[empty] = -1
maxs = np.max(local_indices, axis=axis, keepdims=True)
return np.logical_and(
base_local_indices >= mins,
base_local_indices <= maxs,
)
filled = fill_axis(axis=0)
for axis in range(1, len(shape)):
filled = np.logical_and(filled, fill_axis(axis))
return filled
def fill_base(sparse_indices):
"""
Given a sparse surface voxelization, fill in between columns.
Parameters
--------------
sparse_indices: (n, 3) int, location of filled cells
Returns
--------------
filled: (m, 3) int, location of filled cells
"""
# validate inputs
sparse_indices = np.asanyarray(sparse_indices, dtype=np.int64)
if not util.is_shape(sparse_indices, (-1, 3)):
raise ValueError('incorrect shape')
# create grid and mark inner voxels
max_value = sparse_indices.max() + 3
grid = np.zeros((max_value,
max_value,
max_value),
bool)
voxels_sparse = np.add(sparse_indices, 1)
grid[tuple(voxels_sparse.T)] = 1
for i in range(max_value):
check_dir2 = False
for j in range(0, max_value - 1):
idx = []
# find transitions first
# transition positions are from 0 to 1 and from 1 to 0
eq = np.equal(grid[i, j, :-1], grid[i, j, 1:])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
check_dir2 = (c % 4) > 0 and c > 4
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, j, idx[s]:idx[s + 3]] = 1
if not check_dir2:
continue
# check another direction for robustness
for k in range(0, max_value - 1):
idx = []
# find transitions first
eq = np.equal(grid[i, :-1, k], grid[i, 1:, k])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, idx[s]:idx[s + 3], k] = 1
# generate new voxels
filled = np.column_stack(np.where(grid))
filled -= 1
return filled
fill_voxelization = fill_base
def matrix_to_marching_cubes(matrix, pitch=1.0):
"""
Convert an (n, m, p) matrix into a mesh, using marching_cubes.
Parameters
-----------
matrix : (n, m, p) bool
Occupancy array
Returns
----------
mesh : trimesh.Trimesh
Mesh generated by meshing voxels using
the marching cubes algorithm in skimage
"""
from skimage import measure
from ..base import Trimesh
matrix = np.asanyarray(matrix, dtype=np.bool)
rev_matrix = np.logical_not(matrix) # Takes set about 0.
# Add in padding so marching cubes can function properly with
# voxels on edge of AABB
pad_width = 1
rev_matrix = np.pad(rev_matrix,
pad_width=(pad_width),
mode='constant',
constant_values=(1))
# pick between old and new API
if hasattr(measure, 'marching_cubes_lewiner'):
func = measure.marching_cubes_lewiner
else:
func = measure.marching_cubes
# Run marching cubes.
pitch = np.asanyarray(pitch)
if pitch.size == 1:
pitch = (pitch,) * 3
meshed = func(volume=rev_matrix,
level=.5, # it is a boolean voxel grid
spacing=pitch)
# allow results from either marching cubes function in skimage
# binaries available for python 3.3 and 3.4 appear to use the classic
# method
if len(meshed) == 2:
log.warning('using old marching cubes, may not be watertight!')
vertices, faces = meshed
normals = None
elif len(meshed) == 4:
vertices, faces, normals, vals = meshed
# Return to the origin, add in the pad_width
vertices = np.subtract(vertices, pad_width * pitch)
# create the mesh
mesh = Trimesh(vertices=vertices,
faces=faces,
vertex_normals=normals)
return mesh
def sparse_to_matrix(sparse):
"""
Take a sparse (n,3) list of integer indexes of filled cells,
turn it into a dense (m,o,p) matrix.
Parameters
-----------
sparse : (n, 3) int
Index of filled cells
Returns
------------
dense : (m, o, p) bool
Matrix of filled cells
"""
sparse = np.asanyarray(sparse, dtype=np.int)
if not util.is_shape(sparse, (-1, 3)):
raise ValueError('sparse must be (n,3)!')
shape = sparse.max(axis=0) + 1
matrix = np.zeros(np.product(shape), dtype=np.bool)
multiplier = np.array([np.product(shape[1:]), shape[2], 1])
index = (sparse * multiplier).sum(axis=1)
matrix[index] = True
dense = matrix.reshape(shape)
return dense
def points_to_marching_cubes(points, pitch=1.0):
"""
Mesh points by assuming they fill a voxel box, and then
running marching cubes on them
Parameters
------------
points : (n, 3) float
Points in 3D space
Returns
-------------
mesh : trimesh.Trimesh
Points meshed using marching cubes
"""
# make sure inputs are as expected
points = np.asanyarray(points, dtype=np.float64)
pitch = np.asanyarray(pitch, dtype=float)
# find the minimum value of points for origin
origin = points.min(axis=0)
# convert points to occupied voxel cells
index = ((points - origin) / pitch).round().astype(np.int64)
# convert voxel indices to a matrix
matrix = sparse_to_matrix(index)
# run marching cubes on the matrix to generate a mesh
mesh = matrix_to_marching_cubes(matrix, pitch=pitch)
mesh.vertices += origin
return mesh
def multibox(centers, pitch=1.0, colors=None):
"""
Return a Trimesh object with a box at every center.
Doesn't do anything nice or fancy.
Parameters
-----------
centers : (n, 3) float
Center of boxes that are occupied
pitch : float
The edge length of a voxel
colors : (3,) or (4,) or (n,3) or (n, 4) float
Color of boxes
Returns
---------
rough : Trimesh
Mesh object representing inputs
"""
from .. import primitives
from ..base import Trimesh
# get centers as numpy array
centers = np.asanyarray(
centers, dtype=np.float64)
# get a basic box
b = primitives.Box()
# apply the pitch
b.apply_scale(float(pitch))
# tile into one box vertex per center
v = np.tile(
centers,
(1, len(b.vertices))).reshape((-1, 3))
# offset to centers
v += np.tile(b.vertices, (len(centers), 1))
f = np.tile(b.faces, (len(centers), 1))
f += np.tile(
np.arange(len(centers)) * len(b.vertices),
(len(b.faces), 1)).T.reshape((-1, 1))
face_colors = None
if colors is not None:
colors = np.asarray(colors)
if colors.ndim == 1:
colors = colors[None].repeat(len(centers), axis=0)
if colors.ndim == 2 and len(colors) == len(centers):
face_colors = colors.repeat(12, axis=0)
mesh = Trimesh(vertices=v,
faces=f,
face_colors=face_colors)
return mesh
def boolean_sparse(a, b, operation=np.logical_and):
"""
Find common rows between two arrays very quickly
using 3D boolean sparse matrices.
Parameters
-----------
a: (n, d) int, coordinates in space
b: (m, d) int, coordinates in space
operation: numpy operation function, ie:
np.logical_and
np.logical_or
Returns
-----------
coords: (q, d) int, coordinates in space
"""
# 3D sparse arrays, using wrapped scipy.sparse
# pip install sparse
import sparse
# find the bounding box of both arrays
extrema = np.array([a.min(axis=0),
a.max(axis=0),
b.min(axis=0),
b.max(axis=0)])
origin = extrema.min(axis=0) - 1
size = tuple(extrema.ptp(axis=0) + 2)
# put nearby voxel arrays into same shape sparse array
sp_a = sparse.COO((a - origin).T,
data=np.ones(len(a), dtype=np.bool),
shape=size)
sp_b = sparse.COO((b - origin).T,
data=np.ones(len(b), dtype=np.bool),
shape=size)
# apply the logical operation
# get a sparse matrix out
applied = operation(sp_a, sp_b)
# reconstruct the original coordinates
coords = np.column_stack(applied.coords) + origin
return coords
def strip_array(data):
shape = data.shape
ndims = len(shape)
padding = []
slices = []
for dim, size in enumerate(shape):
axis = tuple(range(dim)) + tuple(range(dim + 1, ndims))
filled = np.any(data, axis=axis)
indices, = np.nonzero(filled)
pad_left = indices[0]
pad_right = indices[-1]
padding.append([pad_left, pad_right])
slices.append(slice(pad_left, pad_right))
return data[tuple(slices)], np.array(padding, int)
def indices_to_points(indices, pitch=None, origin=None):
"""
Convert indices of an (n,m,p) matrix into a set of voxel center points.
Parameters
----------
indices: (q, 3) int, index of voxel matrix (n,m,p)
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) float, list of points
"""
indices = np.asanyarray(indices)
if indices.shape[1:] != (3,):
from IPython import embed
embed()
raise ValueError('shape of indices must be (q, 3)')
points = np.array(indices, dtype=np.float64)
if pitch is not None:
points *= float(pitch)
if origin is not None:
origin = np.asanyarray(origin)
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
points += origin
return points
def matrix_to_points(matrix, pitch=None, origin=None):
"""
Convert an (n,m,p) matrix into a set of points for each voxel center.
Parameters
-----------
matrix: (n,m,p) bool, voxel matrix
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) list of points
"""
indices = np.column_stack(np.nonzero(matrix))
points = indices_to_points(indices=indices,
pitch=pitch,
origin=origin)
return points
def points_to_indices(points, pitch=None, origin=None):
"""
Convert center points of an (n,m,p) matrix into its indices.
Parameters
----------
points : (q, 3) float
Center points of voxel matrix (n,m,p)
pitch : float
What pitch was the voxel matrix computed with
origin : (3,) float
What is the origin of the voxel matrix
Returns
----------
indices : (q, 3) int
List of indices
"""
points = np.array(points, dtype=np.float64)
if points.shape != (points.shape[0], 3):
raise ValueError('shape of points must be (q, 3)')
if origin is not None:
origin = np.asanyarray(origin)
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
points -= origin
if pitch is not None:
points /= pitch
origin = np.asanyarray(origin, dtype=np.float64)
pitch = float(pitch)
indices = np.round(points).astype(int)
return indices
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\" Utility module for command line script select_images\n\"\"\"\n# Author: Ilya Patrushev ilya.patrushev@gmail.com\n\n# License: GPL v2.0\n\nimport os\nimport numpy as np\nimport scipy.linalg as la\n\nfrom cPickle import load\n\nimport cv2\n\ndef image_colour_d... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\" Utility module for command line script select_images\n\"\"\"\n# Author: Ilya Patrushev ilya.patrushev@gmail.com\n\n# License: GPL v2.0\n\nimport os\nimport numpy as np\nimport scipy.linalg as la\n\nfrom cPickle import load\n\nimport cv2\n\nde... | ```python
""" Utility module for command line script select_images
"""
# Author: Ilya Patrushev ilya.patrushev@gmail.com
# License: GPL v2.0
import os
import numpy as np
import scipy.linalg as la
from cPickle import load
import cv2
def image_colour_distribution(img):
"""
Extract colour distribution parameters.
Parameters
----------
img: array [height, width]
The RGB image
Returns
-------
array[9]
colour distribution parameters
"""
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
m = np.mean(lab.reshape(-1, 3), axis=0)
s = la.cholesky(np.cov(lab.reshape(-1, 3).T))
del lab
return np.hstack([ m, s[np.triu_indices(3)]])
def check_cleared(images, model_path=None):
"""
Classify images as cleared or un-cleared based on a GMM.
Parameters
----------
images: list
The RGB images
model_path: str
Path to the damp of GMM
Returns
-------
list
True if the corresponding image is classified as cleared.
"""
if model_path is None:
return np.zeros_like(images, dtype=bool)
if len(images) == 0:
return []
X = np.asarray([image_colour_distribution(img) for img in images])
assert(os.path.exists(model_path))
model, mu, sig, gcl = load(open(model_path))
labels = model.predict((X - mu)/sig)
return labels == gcl
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n#coding=UTF-8\n'''\nCreated on 2012-9-29\n\n@author: s00228753\n'''\n\nimport xml.etree.ElementTree as ET\n\nfrom com.hws.s3.models.bucket import Bucket\nfrom com.hws.s3.models.owner import Owner\nfrom com.hws.s3.utils.utils import Utils\n\n\n#罗列存储空... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#coding=UTF-8\n'''\nCreated on 2012-9-29\n\n@author: s00228753\n'''\n\nimport xml.etree.ElementTree as ET\n\nfrom com.hws.s3.models.bucket import Bucket\nfrom com.hws.s3.models.owner import Owner\nfrom com.hws.s3.utils.utils import U... | ```python
#coding=UTF-8
'''
Created on 2012-9-29
@author: s00228753
'''
import xml.etree.ElementTree as ET
from com.hws.s3.models.bucket import Bucket
from com.hws.s3.models.owner import Owner
from com.hws.s3.utils.utils import Utils
#罗列存储空间方法返回对象,可解析返回的XML为S3存储空间
class ListBucketsResponse(object):
def __init__(self, entries, owner):
self.entries = entries
self.owner = owner
#===========================================================================
# 定义静态方法,用来解析xml,最后返回ListBucketResponse对象
#===========================================================================
@staticmethod
def list_parse_factory(xml):
if xml is not None:
entries = ListAllMyBuckets.parse_buckets(xml)
owner = ListAllMyBuckets.parse_owner(xml)
return ListBucketsResponse(entries, owner) #返回ListBucketsResponse的对象
#===============================================================================
# 该类用于封装解析xml文件,生成Owner和Entries
#===============================================================================
class ListAllMyBuckets:
NS = '{http://s3.amazonaws.com/doc/2006-03-01/}' #xml的命名空间
#===========================================================================
# 获取owner对象
#===========================================================================
@staticmethod
def parse_owner(xml):
root = ET.fromstring(xml) #获取xml文件的根节点root
owner_id = root.find('.//{0}ID'.format(ListAllMyBuckets.NS)).text
owner_name = root.find('.//{0}DisplayName'.format(ListAllMyBuckets.NS)).text
owner = Owner(owner_id, owner_name) #创建Owner对象
return owner
#===========================================================================
# 获取bucket的列表
#===========================================================================
@staticmethod
def parse_buckets(xml):
root = ET.fromstring(xml)
buckets = root.find('{0}Buckets'.format(ListAllMyBuckets.NS)).findall('{0}Bucket'.format(ListAllMyBuckets.NS))
entries = []
for bucket in buckets:
name = bucket.find("{0}Name".format(ListAllMyBuckets.NS)).text #获取bucket的名称
d = bucket.find("{0}CreationDate".format(ListAllMyBuckets.NS)).text #获取bucket的创建日期
creation_date = Utils.transfer_date(d) #将创建日期转换为当地时间
curr_bucket = Bucket(name, creation_date) #创建bucket对象
entries.append(curr_bucket) #向entries列表中添加bucket对象
return entries
``` |
[
{
"content": "```python\n'''Ensure non-immutable constants are copied to prevent\n mutation affecting constant in future uses\n'''\n\nfrom ..runtime.builtins import get_builtin_symbol\nfrom ..runtime.immutable import immutablep\nfrom .walk import IRWalker, propigate_location\nfrom . import ir as I\nfrom .bind... | [
{
"content": "<|memory_start|>```python\n'''Ensure non-immutable constants are copied to prevent\n mutation affecting constant in future uses\n'''\n\nfrom ..runtime.builtins import get_builtin_symbol\nfrom ..runtime.immutable import immutablep\nfrom .walk import IRWalker, propigate_location\nfrom . import ir ... | ```python
'''Ensure non-immutable constants are copied to prevent
mutation affecting constant in future uses
'''
from ..runtime.builtins import get_builtin_symbol
from ..runtime.immutable import immutablep
from .walk import IRWalker, propigate_location
from . import ir as I
from .bind import Binding, BindingUse
copy_binding = Binding(get_builtin_symbol('make-copy'))
def make_copy_form(value, loc_form=None):
copy_form = I.make_call(callee=I.make_read_binding(BindingUse(copy_binding)),
args=[I.make_constant(value)],
kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)
if loc_form is not None:
propigate_location(loc_form, copy_form)
return copy_form
class ConstantCopyInserter(IRWalker):
descend_into_functions = True
def visit_constant(self, cnst):
if not immutablep(cnst.value):
I.replace_child(cnst, make_copy_form(cnst.value, cnst))
def insert_copy_constants(node):
assert not isinstance(node, I.constant)
ConstantCopyInserter().visit(node)
return node
``` |
[
{
"content": "Here is a code file:\n```python\nfrom math_g4z3 import mod_exp\nfrom converts import big_int_to_bin_str\nimport random\n\n\nDH_p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c4... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom math_g4z3 import mod_exp\nfrom converts import big_int_to_bin_str\nimport random\n\n\nDH_p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b... | ```python
from math_g4z3 import mod_exp
from converts import big_int_to_bin_str
import random
DH_p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff
DH_g = 2
class DHClient:
def __init__(self, p=DH_p, g=DH_g):
self.priv_key = random.randrange(p)
self.pub_key = mod_exp(g, self.priv_key, p)
def get_shared_key(self, pub_key):
return big_int_to_bin_str(mod_exp(pub_key, self.priv_key, DH_p))
def test_dh():
rounds = 10
test_pass = True
for x in range(rounds):
dha = DHClient()
dhb = DHClient()
sa = dha.get_shared_key(dhb.pub_key)
sb = dhb.get_shared_key(dha.pub_key)
if sa != sb:
print "Test fail"
test_pass = False
if test_pass:
print "Test pass with {n} cases".format(n=rounds)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigrat... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Comments.datetime'
db.add_column(u'chat_comments', 'datetime',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 4, 30, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Comments.datetime'
db.delete_column(u'chat_comments', 'datetime')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'chat.comments': {
'Meta': {'object_name': 'Comments'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['chat']
``` |
[
{
"content": "```python\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\n\ndef loadData():\n col_names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\n dataframe = pd.read_csv(\"./pima-indians-diabetes.data.txt\", n... | [
{
"content": "<|memory_start|>```python\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\n\ndef loadData():\n col_names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\n dataframe = pd.read_csv(\"./pima-indians-diabet... | ```python
import pandas as pd
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
def loadData():
col_names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pd.read_csv("./pima-indians-diabetes.data.txt", names = col_names)
array = dataframe.values
X = array[:, 0:8]
Y = array[:, 8]
return X, Y
def trainAndTestValidation(X, Y):
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.33, random_state=7)
model = LogisticRegression()
model.fit(X_train, Y_train)
result = model.score(X_test, Y_test)
print("Train And Test Sets. Accuracy: {0}".format(result*100.0))
def kFoldValidation(X, Y):
kfold = model_selection.KFold(n_splits=10, random_state=7)
model = LogisticRegression()
result = model_selection.cross_val_score(model, X, Y, cv=kfold)
print("Kfold. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def leaveOneOutValidation(X, Y):
model = LogisticRegression()
loocv = model_selection.LeaveOneOut()
result = model_selection.cross_val_score(model, X, Y, cv=loocv)
print("LOOCV. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def repeatedRandomTestTrainValidation(X, Y):
kfold = model_selection.ShuffleSplit(n_splits=10, test_size=0.33, random_state=7)
model = LogisticRegression()
result = model_selection.cross_val_score(model, X, Y, cv=kfold)
print("Random Test Train Sets. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def run():
X, Y = loadData()
trainAndTestValidation(X, Y)
kFoldValidation(X, Y)
leaveOneOutValidation(X, Y)
repeatedRandomTestTrainValidation(X, Y)
if __name__ == '__main__':
run()
``` |
[
{
"content": "Repeat the following code:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis file is part of Radar.\n\nRadar is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as published by\nthe Free Software Foundation, either version 3 of the... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis file is part of Radar.\n\nRadar is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as published by\nthe Free Software Foundation, either ... | ```python
# -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from errno import EINTR
from abc import ABCMeta
from argparse import ArgumentParser
from ..logger import RadarLogger
from ..platform_setup import Platform
class CLIError(Exception):
pass
class RadarLauncherError(Exception):
pass
class CLI(object):
def __init__(self, default_main_config_path, program_name='', version=''):
self._program_name = program_name
self._version = version
self._options = self._build_parser(default_main_config_path).parse_args()
def __getattr__(self, option):
try:
return getattr(self._options, option)
except AttributeError:
raise CLIError('Error - Option: \'{:}\' does not exist.'.format(option))
def _build_parser(self, default_main_config_path):
parser = ArgumentParser(prog=self._program_name)
parser.add_argument('-c', '--config', dest='main_config', action='store', default=default_main_config_path, required=False)
parser.add_argument('-v', '--version', action='version', version=self._version)
return parser
class RadarLauncher(object):
__metaclass__ = ABCMeta
PROGRAM_NAME = ''
PROGRAM_VERSION = ''
THREAD_POLLING_TIME = 0.2
AVAILABLE_PLATFORMS = {}
def __init__(self):
cli = CLI(self._get_default_main_config_path(), program_name=self.PROGRAM_NAME, version=self.PROGRAM_VERSION)
self._platform_setup = self._setup_platform(cli.main_config)
def _get_default_main_config_path(self):
return self.AVAILABLE_PLATFORMS[Platform.get_platform_type()].MAIN_CONFIG_PATH
def _setup_platform(self, path):
platform = Platform.get_platform_type()
try:
PlatformSetup = self.AVAILABLE_PLATFORMS[platform]
platform_setup = PlatformSetup(path).configure(self).build()
except KeyError:
raise RadarLauncherError('Error - Platform : \'{:}\' is not available.'.format(platform))
return platform_setup
def _start_threads(self, threads):
[t.start() for t in threads]
def _join_threads(self):
while any([t.is_alive() for t in self._threads]):
[t.join(self.THREAD_POLLING_TIME) for t in self._threads if t.is_alive()]
def stop(self, *args):
[t.stop_event.set() for t in self._threads]
# Let's try to re-join the threads one more time for graceful termination.
def _resume_interrupted_call(self, error):
if error.errno != EINTR:
raise error
self._join_threads()
def run(self):
try:
RadarLogger.log('Starting {:}.'.format(self.PROGRAM_NAME))
self._start_and_join_threads()
except IOError as e:
self._resume_interrupted_call(e)
except Exception as e:
RadarLogger.log('Error - {:} raised an error. Details : {:}.'.format(self.__class__.__name__, e))
finally:
RadarLogger.log('Shutting down {:}.'.format(self.PROGRAM_NAME))
self._platform_setup.tear_down()
``` |
[
{
"content": "```python\nclass Operation:\n def __init__(self, *stations):\n self.stations = stations\n \n \n def __call__(self, params=None, **dependencies):\n options = dict(params=(params or {}), **dependencies)\n success = True\n for station in self.stations:\... | [
{
"content": "<|memory_start|>```python\nclass Operation:\n def __init__(self, *stations):\n self.stations = stations\n \n \n def __call__(self, params=None, **dependencies):\n options = dict(params=(params or {}), **dependencies)\n success = True\n for station in... | ```python
class Operation:
def __init__(self, *stations):
self.stations = stations
def __call__(self, params=None, **dependencies):
options = dict(params=(params or {}), **dependencies)
success = True
for station in self.stations:
if (success and station.runs_on_success) or (not success and station.runs_on_failure):
success = station(options, dependencies)
if success == FailFast:
return Result(False, options)
return Result(success, options)
class Result:
def __init__(self, success, result_data):
self.result_data = result_data
self.success = success
@property
def failure(self):
return not self.success
def __getitem__(self, key):
return self.result_data[key]
def __contains__(self, key):
return key in self.result_data
def get(self, key):
return self.result_data.get(key)
class FailFast:
pass
class Activity:
runs_on_success = False
runs_on_failure = False
def __init__(self, func, name=None):
self.func = func
self.name = name
def callfunc(self, options, dependencies):
params = options["params"]
return self.func(options=options, params=params, **dependencies)
def __call__(self, options, dependencies):
self.callfunc(options, dependencies)
return True
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.name or self.func.__name__)
class step(Activity):
runs_on_success = True
def __init__(self, func, name=None, fail_fast=False):
super().__init__(func, name)
self.fail_fast = fail_fast
def __call__(self, options, dependencies):
res = self.callfunc(options, dependencies)
success = bool(res)
if not success and self.fail_fast:
return FailFast
return success
class failure(Activity):
runs_on_failure = True
def __call__(self, options, dependencies):
self.callfunc(options, dependencies)
return False
class success(Activity):
runs_on_success = True
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Javi Manzano', 'javi.manzano.oller@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nALLOWED_HOSTS = ['176.58.120.22', 'fuzzingtheweb.com']\n\nTIME_ZONE = 'Europe/London'\nUSE_TZ = True\nLANGUAGE_CODE = 'en-gb'\nS... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Javi Manzano', 'javi.manzano.oller@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nALLOWED_HOSTS = ['176.58.120.22', 'fuzzingtheweb.com']\n\nTIME_ZONE = 'Europe/London'\nUSE_TZ = True\nLANGUAGE_C... | ```python
# -*- coding: utf-8 -*-
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Javi Manzano', 'javi.manzano.oller@gmail.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ['176.58.120.22', 'fuzzingtheweb.com']
TIME_ZONE = 'Europe/London'
USE_TZ = True
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = '/home/ubuntu/media/'
MEDIA_URL = '/static/media/'
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
SECRET_KEY = '%3maeu=guk3p#67j-2--drhy$*^vx+=l9r9bltk-n-^cw4#nic'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
'/home/ubuntu/django_apps/fuzzopress/blog/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.markup',
'blogadmin',
'markitup',
'south',
'blog',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
MARKITUP_SET = 'markitup/sets/markdown'
# Settings for main blog app
FUZZOPRESS_SETTINGS = {
'contact': [
{
'name': 'github',
'icon': 'icon-github-alt',
'url': 'https://github.com/jgasteiz',
'show': True,
},
{
'name': 'twitter',
'icon': 'icon-twitter',
'url': 'https://twitter.com/jgasteiz',
'show': True,
},
{
'name': 'googleplus',
'icon': 'icon-google-plus-sign',
'url': 'https://plus.google.com/u/0/104971241169939266087/posts',
'show': True,
},
{
'name': 'email',
'icon': 'icon-envelope-alt',
'url': 'mailto:javi.manzano.oller@gmail.com',
'show': True,
}
],
'analytics': {
'show': True,
'code': 'UA-23612418-1'
},
'tags': {
'show': True
},
'archive': {
'show': True
},
'finder': {
'show': True
},
'entries_per_page': 5
}
try:
from local_settings import *
except ImportError:
pass
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# gpio.py: Implements the GPIO calls to operate OpenSprinkler zones\n#\n# Copyright 2013 Sudaraka Wijesinghe <sudaraka.wijesinghe@gmail.com>\n#\n# This file is part of OpenSprinkler Pi Monitor (OSPi Monitor)\n#\n# OSPi Monitor i... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# gpio.py: Implements the GPIO calls to operate OpenSprinkler zones\n#\n# Copyright 2013 Sudaraka Wijesinghe <sudaraka.wijesinghe@gmail.com>\n#\n# This file is part of OpenSprinkler Pi Monitor (OSPi Monitor)\n#\n... | ```python
# gpio.py: Implements the GPIO calls to operate OpenSprinkler zones
#
# Copyright 2013 Sudaraka Wijesinghe <sudaraka.wijesinghe@gmail.com>
#
# This file is part of OpenSprinkler Pi Monitor (OSPi Monitor)
#
# OSPi Monitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OSPi Monitor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OSPi Monitor. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import sys
import RPi.GPIO as GPIO
from .config import ospim_conf
from .storage import OSPiMZones
# Make sure this script doesn't get executed directly
if '__main__' == __name__:
sys.exit(1)
class OSPiMGPIO:
"""
Makes GPIO calls on RaspberryPi to operate OpenSprinkler hardware
"""
# Indicator to the status of GPIO communication availability
connected = True
# GPIO Pins used for serial communication
_pin_clk = ospim_conf.getint('gpio', 'pin_clk')
_pin_noe = ospim_conf.getint('gpio', 'pin_noe')
_pin_dat = ospim_conf.getint('gpio', 'pin_dat')
_pin_lat = ospim_conf.getint('gpio', 'pin_lat')
def __init__(self):
"""
Initialize GPIO on RaspberryPi to interface with OpenSprinkler shift
register.
"""
try:
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(self._pin_clk, GPIO.OUT)
GPIO.setup(self._pin_noe, GPIO.OUT)
self.shift_register_disable()
GPIO.setup(self._pin_dat, GPIO.OUT)
GPIO.setup(self._pin_lat, GPIO.OUT)
# Write the current status of zones to start with
self.shift_register_write()
self.shift_register_enable()
except Exception as e:
self.connected = False
logging.error('[__init__] Failed to communicate with \
OpenSprinkler: %s' % str(e))
def close(self, bits=None):
""" Write the latest zone status from data file and cleanup GPIO """
self.shift_register_write(bits)
GPIO.cleanup()
def shift_register_enable(self):
""" Set OpenSprinkler shift register status to Enable """
if not self.connected:
return
try:
GPIO.output(self._pin_noe, False)
except Exception as e:
self.connected = False
logging.error('[sr_enable] Failed to communicate with \
OpenSprinkler: %s' % str(e))
def shift_register_disable(self):
""" Set OpenSprinkler shift register status to Disable """
if not self.connected:
return
try:
GPIO.output(self._pin_noe, True)
except Exception as e:
self.connected = False
logging.error('[sr_disable] Failed to communicate with \
OpenSprinkler: %s' % str(e))
def shift_register_write(self, bits=None):
""" Send zone status bits to OpenSprinkler """
if not self.connected:
return
if None == bits:
bits = []
data = OSPiMZones()._data
for i in range(data['zone_count']):
bits.append(data['zone'][i]['status'])
logging.info('[sr_write] Writing: %s' % bits)
try:
GPIO.output(self._pin_clk, False)
GPIO.output(self._pin_lat, False)
# Send bits to OpenSprinkler via GPIO
# Note: Order of the zones we have on the data structure is
# big-endian (first to last), and for the serial communication it
# has to be little-endian (last to first). Hence the len - pos -1
for bit_pos in range(len(bits)):
GPIO.output(self._pin_clk, False)
GPIO.output(self._pin_dat, bits[len(bits) - bit_pos - 1])
GPIO.output(self._pin_clk, True)
GPIO.output(self._pin_lat, True)
except Exception as e:
self.connected = False
logging.error('[sr_write] Failed to communicate with \
OpenSprinkler: %s' % str(e))
``` |
[
{
"content": "```python\n#coding:utf-8\nimport re\nimport commands\n\n###################################\n#\n#\t测试系统日志是否做了权限限制\n#\n####################################\n\nclass TvarfileMod:\n\tglobal results,sorce\n\tresults=[]\n\tsorce=60\n\tdef setBaseline_main(self,baseline_main):\n\t\tself.baseline_main=ba... | [
{
"content": "<|memory_start|>```python\n#coding:utf-8\nimport re\nimport commands\n\n###################################\n#\n#\t测试系统日志是否做了权限限制\n#\n####################################\n\nclass TvarfileMod:\n\tglobal results,sorce\n\tresults=[]\n\tsorce=60\n\tdef setBaseline_main(self,baseline_main):\n\t\tself.... | ```python
#coding:utf-8
import re
import commands
###################################
#
# 测试系统日志是否做了权限限制
#
####################################
class TvarfileMod:
global results,sorce
results=[]
sorce=60
def setBaseline_main(self,baseline_main):
self.baseline_main=baseline_main
def start(self):
print "[*] Checking TvarfileMod!!"
check_list=['/var/log/message','/var/log/secure','/var/log/maillog','/var/log/cron','/var/log/spooler','/var/log/boot.log']
try:
for item in check_list:
if os.path.exists(item):
test_com=commands.getoutput("ls -l "+item).split(" ")
if not test_com[0]=="-rw-r-----":
sorce=sorce-10
results.append({item:test_com[0]})
except:
pass
# print self.baseline_main.output_name
def save(self):
if sorce<60:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"1","results":str(results)})
else:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"0","results":"null"})
print "[*] TvarfileMod Finish!"
def getPluginClass():
return TvarfileMod
``` |
[
{
"content": "```python\nimport logging\nfrom flask_restplus import Namespace, Resource, fields\nfrom flask import jsonify, request\nfrom Service.userInfoService import *\napi = Namespace('user', description='User Info API related operations')\nLOG = logging.getLogger(\"userInfoApi\")\nuser_fields = api.model('... | [
{
"content": "<|memory_start|>```python\nimport logging\nfrom flask_restplus import Namespace, Resource, fields\nfrom flask import jsonify, request\nfrom Service.userInfoService import *\napi = Namespace('user', description='User Info API related operations')\nLOG = logging.getLogger(\"userInfoApi\")\nuser_fiel... | ```python
import logging
from flask_restplus import Namespace, Resource, fields
from flask import jsonify, request
from Service.userInfoService import *
api = Namespace('user', description='User Info API related operations')
LOG = logging.getLogger("userInfoApi")
user_fields = api.model('UserModel', {
'lastLogin': fields.Date,
'totalScore':fields.Integer(description='Total Score for the day'),
'userName': fields.String
})
@api.route('')
class UserInfo(Resource):
@api.expect(user_fields)
def post(self):
json_data = request.get_json()
LOG.debug("Request JSON : %s " % json_data)
createUserInfo(json_data)
return ["POST Request Complete"]
@api.doc(params={'userName': 'Get userInfo by UserName'})
def get(self):
userName = request.args.get('userName')
obj = getUserInfoByUserName(userName)
return jsonify(row2dict(obj))
@api.expect(user_fields)
def put(self):
json_data = request.get_json()
LOG.debug("Request JSON : %s " % json_data)
updateUserInfoByUserName(json_data)
return ["PUT Request Complete"]
@api.route('/calculate')
@api.doc(params={'amount': 'Amount Value to update','operation': 'Add or Sub', 'userName':'userName to be updated'})
class scoresTotal(Resource):
def put(self):
amount = request.args.get('amount')
operation = request.args.get('operation')
userName = request.args.get('userName')
evaluateScoreTotal(amount,operation,userName)
return "Evaluated Total successfully"
@api.route('/calculate/bonus')
class scoresDate(Resource):
@api.doc(params={'userName':'userName to be updated'})
def put(self):
userName = request.args.get('userName')
evaluateBonus(userName)
return "Evaluated Bonus successfully"
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d
``` |
[
{
"content": "Recreate the original code text:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\nimport feedparser\nfrom .Hunter import Hunter\nfrom .Tweet import Tweet\nimport logging\nfrom textblob import TextBlob\n\n\n# Find new tweets from RSS streams\nclass RSSHunter(Hunter):\n \"\"\"\n ... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\nimport feedparser\nfrom .Hunter import Hunter\nfrom .Tweet import Tweet\nimport logging\nfrom textblob import TextBlob\n\n\n# Find new tweets from RSS streams\nclass RSSHunter(Hunter):... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import feedparser
from .Hunter import Hunter
from .Tweet import Tweet
import logging
from textblob import TextBlob
# Find new tweets from RSS streams
class RSSHunter(Hunter):
"""
Find new tweets from RSS streams
"""
# Constructor
def __init__(self, stream):
self._stream = stream
self._stream_url = stream['url']
logging.debug(u"Retreiving RSS stream {}".format(self._stream_url))
self._entries = feedparser.parse(self._stream_url)['entries']
self._hashtags = stream['hashtags'] if 'hashtags' in stream else list()
self._lang = stream['lang']
self._current = 0
# end __init__
# Get stream
def get_stream(self):
"""
Get stream
"""
return self._stream
# end get_stream
# To unicode
def __unicode__(self):
"""
To unicode
:return:
"""
return u"RSSHunter(stream={})".format(self._stream)
# end __unicode__
# Iterator
def __iter__(self):
"""
Iterator
:return:
"""
return self
# end __iter__
# Next
def next(self):
"""
Next
:return:
"""
if self._current >= len(self._entries):
raise StopIteration
# end if
# Found
found = False
while not found and self._current < len(self._entries):
# Get current entry
current_entry = self._entries[self._current]
# Analyze text
tweet_blob = TextBlob(current_entry['title'])
# Right language
if tweet_blob.detect_language() in self._lang:
found = True
# end if
# Next
self._current += 1
# end while
# Tweet generator
if found:
return Tweet(current_entry['title'], current_entry['links'][0]['href'], self._hashtags)
else:
raise StopIteration
# end if
# end next
# end RSSHunter
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#\n# Copyright John Reid 2013\n#\n\n\n\"\"\"\nExample to illustrate application of pybool. Based on regulatory network in paper\non robustness under functional constraint by Nakajima et al.\n\"\"\"\n\nimport numpy as N, logging\nfr... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#\n# Copyright John Reid 2013\n#\n\n\n\"\"\"\nExample to illustrate application of pybool. Based on regulatory network in paper\non robustness under functional constraint by Nakajima et al.\n\"\"\"\n\nimport numpy a... | ```python
#
# Copyright John Reid 2013
#
"""
Example to illustrate application of pybool. Based on regulatory network in paper
on robustness under functional constraint by Nakajima et al.
"""
import numpy as N, logging
from pybool.constraints import gene_off, gene_on
from pybool import network, constraints
class MetaData(constraints.MetaData):
"""
Meta-data for drosophila neurogenesis regulatory
networks in Nakajima paper.
All pybool configuration is done through attributes of this class.
"""
def __init__(self):
"Construct."
#
# Initialise the base class
#
super(MetaData, self).__init__()
#
# The number of time steps to realise.
#
self.T = 12
#
# Add the genes
#
SVP = self.add_external_input(
'svp',
color='purple',
position=( 1, 1.5),
input_function=svp_external_input,
input_params=(None,),
)
HB = self.add_gene(
'hb',
initial_state=1,
constitutive=(0, 1),
color='green',
position=( 0, 1.5),
)
KR = self.add_gene(
'Kr',
initial_state=0,
constitutive=(0, 1),
color='darkblue',
position=(-1, 1 ),
)
PDM = self.add_gene(
'pdm',
initial_state=0,
constitutive=(0, 1),
color='deepskyblue',
position=( 0, .5),
)
CAS = self.add_gene(
'cas',
initial_state=0,
constitutive=(0, 1),
color='#DD0000',
position=(-1, 0 ),
)
X = self.add_external_input(
'X',
color='black',
position=( 1, .5),
input_function=X_external_input,
input_params=N.arange(1, self.T),
)
#
# Add each condition as a dict mapping genes to states and a constraints function
#
wt = self.add_condition(
'wt',
{ },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR, PDM, CAS)),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR, PDM, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
hb_ko = self.add_condition(
'hb-',
inputs={ HB : gene_off },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (KR, PDM, CAS)),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (KR, PDM, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
kr_ko = self.add_condition(
'Kr-',
inputs={ KR : gene_off },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, PDM, CAS)),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, PDM, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
pdm_ko = self.add_condition(
'pdm-',
inputs={ PDM : gene_off },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR, CAS)),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
cas_ko = self.add_condition(
'cas-',
inputs={ CAS : gene_off },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR, PDM)),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR, PDM)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
hb_oe = self.add_condition(
'hb++',
inputs={ HB : gene_on },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR)),
(constraints.CHECK_NULL_EXPRESSION, PDM),
(constraints.CHECK_NULL_EXPRESSION, CAS),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
kr_oe = self.add_condition(
'Kr++' ,
inputs={ KR : gene_on },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR, PDM)),
(constraints.CHECK_NULL_EXPRESSION, CAS),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR, PDM)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
pdm_oe = self.add_condition(
'pdm++',
inputs={ PDM : gene_on },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, PDM, CAS)),
(constraints.CHECK_NULL_EXPRESSION, KR),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, PDM, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
cas_oe = self.add_condition(
'cas++',
inputs={ CAS : gene_on },
constraints=(
(constraints.CHECK_ORDER_OF_EXPRESSION, (HB, KR)),
(constraints.CHECK_NULL_EXPRESSION, PDM),
(constraints.CHECK_CONSECUTIVE_DIFFERENT, (HB, KR, CAS)),
(constraints.CHECK_ON_TO_OFF_SWITCH, X),
)
)
#
# The condition to use if none specified.
#
self.default_condition = wt
#
# set up the possible regulatory connections
#
unconstrained = (-5, 0, 1)
represses_or_none = (-5, 0)
activates = (1,)
represses = (-5,)
no_regulation = (0,)
# initialise all connections to no_regulation
for g1 in xrange(self.G):
for g2 in xrange(self.G):
self.possible_Js[g1, g2] = no_regulation
# X can regulate any of HB, KR, PDM and CAS
self.possible_Js[ X, HB] = unconstrained
self.possible_Js[ X, KR] = unconstrained
self.possible_Js[ X,PDM] = unconstrained
self.possible_Js[ X,CAS] = unconstrained
# from Figure 1 in Nakajima paper
self.possible_Js[SVP, HB] = represses
self.possible_Js[ HB, KR] = activates
self.possible_Js[ HB,PDM] = represses
self.possible_Js[ HB,CAS] = represses_or_none
self.possible_Js[ KR,PDM] = activates
self.possible_Js[ KR,CAS] = represses
self.possible_Js[PDM, KR] = represses
self.possible_Js[PDM,CAS] = activates
self.possible_Js[CAS,PDM] = represses
def svp_external_input(t, p):
"External input function for svp. svp is on when t = 1."
return 1 == t and 1 or 0
``` |
[
{
"content": "```python\n# Copyright 2015-2016 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LIC... | [
{
"content": "<|memory_start|>```python\n# Copyright 2015-2016 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.... | ```python
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import OrderedDict
import sys
from catkin_pkg.package import parse_package_string
from ros_buildfarm.common import get_default_node_label
from ros_buildfarm.common import get_doc_job_name
from ros_buildfarm.common import get_doc_view_name
from ros_buildfarm.common import get_github_project_url
from ros_buildfarm.common import get_node_label
from ros_buildfarm.common \
import get_repositories_and_script_generating_key_files
from ros_buildfarm.common import git_github_orgunit
from ros_buildfarm.common import JobValidationError
from ros_buildfarm.common import write_groovy_script_and_configs
from ros_buildfarm.config import get_distribution_file
from ros_buildfarm.config import get_doc_build_files
from ros_buildfarm.config import get_global_doc_build_files
from ros_buildfarm.config import get_index as get_config_index
from ros_buildfarm.git import get_repository
from ros_buildfarm.templates import expand_template
from rosdistro import get_distribution_cache
from rosdistro import get_index
def configure_doc_jobs(
config_url, rosdistro_name, doc_build_name, groovy_script=None,
dry_run=False, whitelist_repository_names=None):
"""
Configure all Jenkins doc jobs.
L{configure_doc_job} will be invoked for doc repository and target
which matches the build file criteria.
"""
config = get_config_index(config_url)
build_files = get_doc_build_files(config, rosdistro_name)
build_file = build_files[doc_build_name]
index = get_index(config.rosdistro_index_url)
dist_cache = None
if build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name)
# get targets
targets = []
for os_name in build_file.targets.keys():
for os_code_name in build_file.targets[os_name].keys():
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
print('The build file contains the following targets:')
for os_name, os_code_name, arch in targets:
print(' -', os_name, os_code_name, arch)
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
print('No distribution file matches the build file')
return
doc_view_name = get_doc_view_name(rosdistro_name, doc_build_name)
# all further configuration will be handled by either the Jenkins API
# or by a generated groovy script
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url) if groovy_script is None else False
view_configs = {}
views = {}
views[doc_view_name] = configure_doc_view(
jenkins, doc_view_name, dry_run=dry_run)
if not jenkins:
view_configs.update(views)
groovy_data = {
'dry_run': dry_run,
'expected_num_views': len(view_configs),
}
repo_names = dist_file.repositories.keys()
filtered_repo_names = build_file.filter_repositories(repo_names)
job_names = []
job_configs = OrderedDict()
for repo_name in sorted(repo_names):
if whitelist_repository_names:
if repo_name not in whitelist_repository_names:
print(
"Skipping repository '%s' not in explicitly passed list" %
repo_name, file=sys.stderr)
continue
is_disabled = repo_name not in filtered_repo_names
if is_disabled and build_file.skip_ignored_repositories:
print("Skipping ignored repository '%s'" % repo_name,
file=sys.stderr)
continue
repo = dist_file.repositories[repo_name]
if not repo.doc_repository:
print("Skipping repository '%s': no doc section" % repo_name)
continue
if not repo.doc_repository.version:
print("Skipping repository '%s': no doc version" % repo_name)
continue
for os_name, os_code_name, arch in targets:
try:
job_name, job_config = configure_doc_job(
config_url, rosdistro_name, doc_build_name,
repo_name, os_name, os_code_name, arch,
config=config, build_file=build_file,
index=index, dist_file=dist_file,
dist_cache=dist_cache, jenkins=jenkins, views=views,
is_disabled=is_disabled,
groovy_script=groovy_script,
dry_run=dry_run)
job_names.append(job_name)
if groovy_script is not None:
print("Configuration for job '%s'" % job_name)
job_configs[job_name] = job_config
except JobValidationError as e:
print(e.message, file=sys.stderr)
groovy_data['expected_num_jobs'] = len(job_configs)
groovy_data['job_prefixes_and_names'] = {}
job_prefix = '%s__' % doc_view_name
if not whitelist_repository_names:
groovy_data['job_prefixes_and_names']['doc'] = (job_prefix, job_names)
if groovy_script is None:
# delete obsolete jobs in this view
from ros_buildfarm.jenkins import remove_jobs
print('Removing obsolete doc jobs')
remove_jobs(jenkins, job_prefix, job_names, dry_run=dry_run)
if groovy_script is not None:
print(
"Writing groovy script '%s' to reconfigure %d views and %d jobs" %
(groovy_script, len(view_configs), len(job_configs)))
content = expand_template(
'snippet/reconfigure_jobs.groovy.em', groovy_data)
write_groovy_script_and_configs(
groovy_script, content, job_configs, view_configs=view_configs)
def configure_doc_job(
config_url, rosdistro_name, doc_build_name,
repo_name, os_name, os_code_name, arch,
config=None, build_file=None,
index=None, dist_file=None, dist_cache=None,
jenkins=None, views=None,
is_disabled=False,
groovy_script=None,
doc_repository=None,
dry_run=False):
"""
Configure a single Jenkins doc job.
This includes the following steps:
- clone the doc repository to use
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the run_doc_job.py script
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_doc_build_files(config, rosdistro_name)
build_file = build_files[doc_build_name]
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
repo_names = dist_file.repositories.keys()
if repo_name is not None:
if repo_name not in repo_names:
raise JobValidationError(
"Invalid repository name '%s' " % repo_name +
'choose one of the following: %s' %
', '.join(sorted(repo_names)))
repo = dist_file.repositories[repo_name]
if not repo.doc_repository:
raise JobValidationError(
"Repository '%s' has no doc section" % repo_name)
if not repo.doc_repository.version:
raise JobValidationError(
"Repository '%s' has no doc version" % repo_name)
doc_repository = repo.doc_repository
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets.keys())))
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError(
"Invalid OS code name '%s' " % os_code_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets[os_name].keys())))
if arch not in build_file.targets[os_name][os_code_name]:
raise JobValidationError(
"Invalid architecture '%s' " % arch +
'choose one of the following: %s' % ', '.join(sorted(
build_file.targets[os_name][os_code_name])))
if dist_cache is None and build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name)
if jenkins is None:
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
if views is None:
view_name = get_doc_view_name(
rosdistro_name, doc_build_name)
configure_doc_view(jenkins, view_name, dry_run=dry_run)
job_name = get_doc_job_name(
rosdistro_name, doc_build_name,
repo_name, os_name, os_code_name, arch)
job_config = _get_doc_job_config(
config, config_url, rosdistro_name, doc_build_name,
build_file, os_name, os_code_name, arch, doc_repository,
repo_name, dist_cache=dist_cache, is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
return job_name, job_config
def configure_doc_view(jenkins, view_name, dry_run=False):
from ros_buildfarm.jenkins import configure_view
return configure_view(
jenkins, view_name, include_regex='%s__.+' % view_name,
template_name='dashboard_view_all_jobs.xml.em', dry_run=dry_run)
def _get_doc_job_config(
config, config_url, rosdistro_name, doc_build_name,
build_file, os_name, os_code_name, arch, doc_repo_spec,
repo_name, dist_cache=None, is_disabled=False):
if build_file.documentation_type == 'rosdoc2':
template_name = 'doc/rosdoc2_job.xml.em'
else:
template_name = 'doc/doc_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(build_file=build_file)
maintainer_emails = set([])
if build_file.notify_maintainers and dist_cache and repo_name and \
repo_name in dist_cache.distribution_file.repositories:
# add maintainers listed in latest release to recipients
repo = dist_cache.distribution_file.repositories[repo_name]
if repo.release_repository:
for pkg_name in repo.release_repository.package_names:
if pkg_name not in dist_cache.release_package_xmls:
continue
pkg_xml = dist_cache.release_package_xmls[pkg_name]
pkg = parse_package_string(pkg_xml)
for m in pkg.maintainers:
maintainer_emails.add(m.email)
job_data = {
'github_url': get_github_project_url(doc_repo_spec.url),
'job_priority': build_file.jenkins_job_priority,
'node_label': get_node_label(
build_file.jenkins_job_label,
get_default_node_label('%s_%s_%s' % (
rosdistro_name, 'doc', doc_build_name))),
'doc_repo_spec': doc_repo_spec,
'disabled': is_disabled,
'github_orgunit': git_github_orgunit(doc_repo_spec.url),
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'config_url': config_url,
'rosdistro_index_url': config.rosdistro_index_url,
'rosdistro_name': rosdistro_name,
'doc_build_name': doc_build_name,
'os_name': os_name,
'os_code_name': os_code_name,
'arch': arch,
'build_tool': build_file.build_tool,
'repository_args': repository_args,
'upload_user': build_file.upload_user,
'upload_host': build_file.upload_host,
'upload_root': build_file.upload_root,
'notify_emails': build_file.notify_emails,
'maintainer_emails': maintainer_emails,
'notify_maintainers': build_file.notify_maintainers,
'notify_committers': build_file.notify_committers,
'timeout_minutes': build_file.jenkins_job_timeout,
'credential_id': build_file.upload_credential_id,
'git_ssh_credential_id': config.git_ssh_credential_id,
}
job_config = expand_template(template_name, job_data)
return job_config
def configure_doc_metadata_job(
config_url, rosdistro_name, doc_build_name,
config=None, build_file=None, dry_run=False):
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_doc_build_files(config, rosdistro_name)
build_file = build_files[doc_build_name]
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
job_name = get_doc_view_name(rosdistro_name, doc_build_name)
job_config = _get_doc_metadata_job_config(
config, config_url, rosdistro_name, doc_build_name, build_file)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
def _get_doc_metadata_job_config(
config, config_url, rosdistro_name, doc_build_name, build_file):
template_name = 'doc/doc_metadata_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(config=config)
job_data = {
'job_priority': build_file.jenkins_job_priority,
'node_label': get_node_label(
build_file.jenkins_job_label,
get_default_node_label('%s_%s_%s' % (
rosdistro_name, 'doc', doc_build_name))),
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'config_url': config_url,
'rosdistro_name': rosdistro_name,
'doc_build_name': doc_build_name,
'repository_args': repository_args,
'notify_emails': build_file.notify_emails,
'timeout_minutes': build_file.jenkins_job_timeout,
'credential_id': build_file.upload_credential_id,
}
job_config = expand_template(template_name, job_data)
return job_config
def configure_doc_independent_job(
config_url, doc_build_name, config=None, build_file=None,
dry_run=False):
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_global_doc_build_files(config)
build_file = build_files[doc_build_name]
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
job_name = 'doc_%s' % doc_build_name
job_config = _get_doc_independent_job_config(
config, config_url, job_name, build_file)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
def _get_doc_independent_job_config(
config, config_url, doc_build_name, build_file):
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(config=config)
job_data = {
'job_priority': build_file.jenkins_job_priority,
'node_label': get_node_label(build_file.jenkins_job_label),
'ros_buildfarm_repository': get_repository(),
'doc_repositories': build_file.doc_repositories,
'script_generating_key_files': script_generating_key_files,
'config_url': config_url,
'doc_build_name': doc_build_name,
'repository_args': repository_args,
'notify_emails': build_file.notify_emails,
'timeout_minutes': build_file.jenkins_job_timeout,
'upload_user': build_file.upload_user,
'upload_host': build_file.upload_host,
'upload_root': build_file.upload_root,
'upload_credential_id': build_file.upload_credential_id
}
if build_file.documentation_type == 'make_target':
template_name = 'doc/doc_independent_job.xml.em'
job_data.update({
'install_apt_packages': build_file.install_apt_packages,
'install_pip_packages': build_file.install_pip_packages,
})
elif build_file.documentation_type == 'docker_build':
template_name = 'doc/doc_independent_docker_job.xml.em'
job_data.update({
'upload_repository_url': build_file.upload_repository_url,
'upload_repository_branch': build_file.upload_repository_branch,
})
else:
raise JobValidationError(
'Not independent documentation_type: ' +
build_file.documentation_type
)
job_config = expand_template(template_name, job_data)
return job_config
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n\"\"\"Given the names and grades for each student in a Physics class of N\n students, store them in a nested list and print the name(s) of any\n student(s) having the second lowest grade.\n\nNote: If there are multiple students with the same gr... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n\"\"\"Given the names and grades for each student in a Physics class of N\n students, store them in a nested list and print the name(s) of any\n student(s) having the second lowest grade.\n\nNote: If there are multiple students ... | ```python
"""Given the names and grades for each student in a Physics class of N
students, store them in a nested list and print the name(s) of any
student(s) having the second lowest grade.
Note: If there are multiple students with the same grade, order their
names alphabetically and print each name on a new line.
Input Format
The first line contains an integer, N, the number of students.
The subsequent lines describe each student over 2N lines; the first
line contains a student's name, and the second line contains their
grade.
Constraints
2 <= N <= 5
There will always be one or more students having the second lowest
grade.
Output Format
Print the name(s) of any student(s) having the second lowest grade
in Physics; if there are multiple students, order their names
alphabetically and print each one on a new line.
"""
from operator import itemgetter
def second_lowest(*args):
arr = args[0]
lowest, higher_lowest = arr[0], ["", 100]
for student in arr:
if student[1] < higher_lowest[1]:
if student[1] < lowest[1]:
higher_lowest, lowest = lowest, student
elif student[1] == lowest[1]:
continue
else:
higher_lowest = student
return higher_lowest[1]
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
second_largest_grade = second_lowest(students)
result_list = list(filter(lambda x: x[1] == second_largest_grade, students))
result_list.sort(key=itemgetter(0))
for student in result_list:
print(student[0])
``` |
[
{
"content": "Here is some code:\n```python\nfrom pypot.primitive import LoopPrimitive\n\n\nclass FaceTracking(LoopPrimitive):\n def __init__(self, robot, freq, face_detector):\n LoopPrimitive.__init__(self, robot, freq)\n\n self.face_detector = face_detector\n\n self.dx, self.dy = 60, 5... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nfrom pypot.primitive import LoopPrimitive\n\n\nclass FaceTracking(LoopPrimitive):\n def __init__(self, robot, freq, face_detector):\n LoopPrimitive.__init__(self, robot, freq)\n\n self.face_detector = face_detector\n\n self.dx,... | ```python
from pypot.primitive import LoopPrimitive
class FaceTracking(LoopPrimitive):
def __init__(self, robot, freq, face_detector):
LoopPrimitive.__init__(self, robot, freq)
self.face_detector = face_detector
self.dx, self.dy = 60, 50
self._tracked_face = None
def setup(self):
self.robot.rest_posture.start()
self.robot.rest_posture.wait_to_stop()
for m in self.robot._robot.motors:
m.led = 'yellow'
self.rest_pos = {m.name: m.present_position for m in self.robot.motors}
for m in [self.robot.m1, self.robot.m5]:
m.moving_speed = 50.
# TODO: That's a really nasty way to circumvent prim sandboxing
# How should we do that in a more elegant way?
img = getattr(self.face_detector._robot,
self.face_detector._names[0]).frame
self.img_size = tuple(reversed(img.shape[:2]))
def update(self):
faces = self.face_detector.faces
# use filter to keep only closest faces to preivoulsy tracked one
if faces:
x, y = faces[0].center
x = (float(x) / self.img_size[0]) * 2 - 1
y = (float(y) / self.img_size[1]) * 2 - 1
self.robot.m1.goal_position = self.rest_pos['m1'] -x * self.dx
self.robot.m5.goal_position = self.rest_pos['m5'] + y * self.dy
def teardown(self):
for m in self.robot._robot.motors:
m.led = 'off'
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport os\nimport shutil\nimport glob\nimport logging\nfrom autotest.client import test, utils\nfrom autotest.client.shared import error\n\n\nclass ctcs(test.test):\n\n \"\"\"\n This autotest module runs CTCS (Cerberus Test Control System... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport os\nimport shutil\nimport glob\nimport logging\nfrom autotest.client import test, utils\nfrom autotest.client.shared import error\n\n\nclass ctcs(test.test):\n\n \"\"\"\n This autotest module runs CTCS (Cerberus Tes... | ```python
import os
import shutil
import glob
import logging
from autotest.client import test, utils
from autotest.client.shared import error
class ctcs(test.test):
"""
This autotest module runs CTCS (Cerberus Test Control System), that is being
maintained on a new location, since both CTCS and CTCS2 on sourceforge
were abandoned.
The original test suite (Cerberus Test Control System) was developed for
the now extinct VA Linux's manufacturing system it has several hardware
and software stress tests that can be run in parallel. It does have a
control file system that allows testers to specify the sorts of tests that
they want to see executed. It's an excellent stress test for hardware and
kernel.
@author Manas Kumar Nayak (maknayak@in.ibm.com) (original code)
@author Lucas Meneghel Rodrigues (lucasmr@br.ibm.com) (rewrite - ctcs)
@author Cao, Chen (kcao@redhat.com) (use ctcs2 and port it to 64)
@author Lucas Meneghel Rodrigues (lmr@redhat.com) (use ctcs new source repo)
@see: https://github.com/autotest/ctcs
"""
version = 3
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
def setup(self, tarball='ctcs.tar.bz2', length='4h', tc_opt='-k',
tcf_contents=None):
"""
Builds the test suite, and sets up the control file that is going to
be processed by the ctcs engine.
:param tarball: CTCS tarball
:param length: The amount of time we'll run the test suite
:param tcf_contents: If the user wants to specify the contents of
the CTCS control file, he could do so trough this parameter.
If this parameter is provided, length is ignored.
"""
ctcs_tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(ctcs_tarball, self.srcdir)
os.chdir(self.srcdir)
utils.make()
# Here we define the cerberus suite control file that will be used.
# It will be kept on the debug directory for further analysis.
self.tcf_path = os.path.join(self.debugdir, 'autotest.tcf')
if not tcf_contents:
logging.info('Generating CTCS control file')
# Note about the control file generation command - we are creating
# a control file with the default tests, except for the kernel
# compilation test (flag -k).
g_cmd = ('./newburn-generator %s %s> %s' %
(tc_opt, length, self.tcf_path))
utils.system(g_cmd)
else:
logging.debug('TCF file contents supplied, ignoring test length'
' altogether')
tcf = open(self.tcf_path, 'w')
tcf.write(tcf_contents)
logging.debug('Contents of the control file that will be passed to '
'CTCS:')
tcf = open(self.tcf_path, 'r')
buf = tcf.read()
logging.debug(buf)
def run_once(self):
"""
Runs the test, with the appropriate control file.
"""
os.chdir(self.srcdir)
try:
utils.system('./run %s' % self.tcf_path)
except Exception:
self.nfail += 1
log_base_path = os.path.join(self.srcdir, 'log')
log_dir = glob.glob(os.path.join(log_base_path,
'autotest.tcf.log.*'))[0]
logging.debug('Copying %s log directory to results dir', log_dir)
dst = os.path.join(self.resultsdir, os.path.basename(log_dir))
shutil.move(log_dir, dst)
def cleanup(self):
"""
Cleans up source directory and raises on failure.
"""
if os.path.isdir(self.srcdir):
shutil.rmtree(self.srcdir)
if self.nfail != 0:
raise error.TestFail('CTCS execution failed')
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n############################################################################\n#\n# Copyright (C) 2016 The Qt Company Ltd.\n# Contact: https://www.qt.io/licensing/\n#\n# This file is part of Qt Creator.\n#\n# Commercial License Usage\n# Licensees holding va... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n############################################################################\n#\n# Copyright (C) 2016 The Qt Company Ltd.\n# Contact: https://www.qt.io/licensing/\n#\n# This file is part of Qt Creator.\n#\n# Commercial License Usage\n# Lice... | ```python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
from dumper import *
def stripTypeName(value):
type = value.type
try:
type = type.target()
except:
pass
return str(type.unqualified())
def extractPointerType(d, value):
postfix = ""
while stripTypeName(value) == "CPlusPlus::PointerType":
postfix += "*"
value = d.downcast(value["_elementType"]["_type"])
try:
return readLiteral(d, value["_name"]) + postfix
except:
typeName = str(value.type.unqualified().target())
if typeName == "CPlusPlus::IntegerType":
return "int" + postfix
elif typeName == "CPlusPlus::VoidType":
return "void" + postfix
return "<unsupported>"
def readTemplateName(d, value):
name = readLiteral(d, value["_identifier"]) + "<"
args = value["_templateArguments"]
impl = args["_M_impl"]
start = impl["_M_start"]
size = impl["_M_finish"] - start
try:
d.check(0 <= size and size <= 100)
d.checkPointer(start)
for i in range(int(size)):
if i > 0:
name += ", "
name += extractPointerType(d, d.downcast(start[i]["_type"]))
except:
return "<not accessible>"
name += ">"
return name
def readLiteral(d, value):
if d.isNull(value):
return "<null>"
value = d.downcast(value)
type = value.type.unqualified()
try:
type = type.target()
except:
pass
typestr = str(type)
if typestr == "CPlusPlus::TemplateNameId":
return readTemplateName(d, value)
elif typestr == "CPlusPlus::QualifiedNameId":
return readLiteral(d, value["_base"]) + "::" + readLiteral(d, value["_name"])
try:
return d.extractBlob(value["_chars"], value["_size"]).toString()
except:
return "<unsupported>"
def dumpLiteral(d, value):
d.putValue(d.hexencode(readLiteral(d, value)), "latin1")
def qdump__Core__Id(d, value):
val = value.extractPointer()
try:
name = d.parseAndEvaluate("Core::nameForId(0x%x)" % val)
d.putSimpleCharArray(name.pointer())
except:
d.putValue(val)
d.putPlainChildren(value)
def qdump__Debugger__Internal__GdbMi(d, value):
val = d.encodeString(value["m_name"]) + "3a002000" \
+ d.encodeString(value["m_data"])
d.putValue(val, "utf16")
d.putPlainChildren(value)
def qdump__Debugger__Internal__DisassemblerLine(d, value):
d.putByteArrayValue(value["m_data"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchData(d, value):
d.putStringValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchItem(d, value):
d.putStringValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__BreakpointModelId(d, value):
d.putValue("%s.%s" % (int(value["m_majorPart"]), int(value["m_minorPart"])))
d.putPlainChildren(value)
def qdump__Debugger__Internal__ThreadId(d, value):
d.putValue("%s" % value["m_id"])
d.putPlainChildren(value)
def qdump__CPlusPlus__ByteArrayRef(d, value):
d.putSimpleCharArray(value["m_start"], value["m_length"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Identifier(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Symbol(d, value):
dumpLiteral(d, value["_name"])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Class(d, value):
qdump__CPlusPlus__Symbol(d, value)
def qdump__CPlusPlus__IntegerType(d, value):
d.putValue(value["_kind"])
d.putPlainChildren(value)
def qdump__CPlusPlus__FullySpecifiedType(d, value):
type = d.downcast(value["_type"])
typeName = stripTypeName(type)
if typeName == "CPlusPlus::NamedType":
dumpLiteral(d, type["_name"])
elif typeName == "CPlusPlus::PointerType":
d.putValue(d.hexencode(extractPointerType(d, type)), "latin1")
d.putPlainChildren(value)
def qdump__CPlusPlus__NamedType(d, value):
dumpLiteral(d, value["_name"])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__PointerType(d, value):
d.putValue(d.hexencode(extractPointerType(d, value)), "latin1")
d.putPlainChildren(value)
def qdump__CPlusPlus__TemplateNameId(d, value):
dumpLiteral(d, value)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__QualifiedNameId(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__Literal(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__StringLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__Value(d, value):
d.putValue(value["l"])
d.putPlainChildren(value)
def qdump__Utils__FileName(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__Utils__ElfSection(d, value):
d.putByteArrayValue(value["name"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Token(d, value):
k = value["f"]["kind"]
e = int(k)
type = str(k.cast(d.lookupType("CPlusPlus::Kind")))[11:] # Strip "CPlusPlus::"
try:
if e == 6:
type = readLiteral(d, value["identifier"]) + " (%s)" % type
elif e >= 7 and e <= 23:
type = readLiteral(d, value["literal"]) + " (%s)" % type
except:
pass
d.putValue(type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__PPToken(d, value):
data, size, alloc = d.byteArrayData(value["m_src"])
length = int(value["f"]["utf16chars"])
offset = int(value["utf16charOffset"])
#warn("size: %s, alloc: %s, offset: %s, length: %s, data: %s"
# % (size, alloc, offset, length, data))
d.putValue(d.readMemory(data + offset, min(100, length)), "latin1")
d.putPlainChildren(value)
def qdump__ProString(d, value):
try:
s = value["m_string"]
data, size, alloc = d.stringData(s)
data += 2 * int(value["m_offset"])
size = int(value["m_length"])
s = d.readMemory(data, 2 * size)
d.putValue(s, "utf16")
except:
d.putEmptyValue()
d.putPlainChildren(value)
def qdump__ProKey(d, value):
qdump__ProString(d, value)
d.putBetterType(value.type)
def qdump__Core__GeneratedFile(d, value):
d.putStringValue(value["m_d"]["d"]["path"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__Node(d, value):
d.putStringValue(value["m_filePath"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__FolderNode(d, value):
d.putStringValue(value["m_displayName"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__ProjectNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__CMakeProjectManager__Internal__CMakeProjectNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__QmakeProjectManager__QmakePriFileNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__QmakeProjectManager__QmakeProFileNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
``` |
[
{
"content": "Here is the code content:\n```python\n## This file is part of Invenio.\n## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Fre... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n## This file is part of Invenio.\n## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## publ... | ```python
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
from invenio.legacy.dbquery import run_sql
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field
from invenio.ext.logging import register_exception
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
t = t+"<CENTER bgcolor=\"white\">The document <B>%s</B> has been found in the database.</CENTER><br />Please modify the following fields:<br />Then press the 'END' button at the bottom of the page<br />\n" % rn
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<TEXTAREA name=\"%s\" rows=%s cols=%s wrap>%s</TEXTAREA>" % (field, numrows, numcols, value)
elif element_type == "F":
text = "<INPUT TYPE=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
value = re.sub("[\n\r\t]+", "", value)
text = "<INPUT name=\"%s\" size=%s value=\"%s\"> " % (field, size, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "H":
text = "<INPUT type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val)
text = text + "<SCRIPT>document.forms[0].%s.value=\"%s\";</SCRIPT>" % (field, value)
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += "<SCRIPT>\n"
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % (val, val)
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "</SCRIPT>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
# output some more text
t = t + "<br /><br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"END\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>"
return t
``` |
[
{
"content": "Here is a code snippet:\n```python\nfrom multiprocessing import Process, Queue\nfrom Queue import Empty, Full\nfrom datetime import datetime\nfrom copy import deepcopy\nimport json\nimport time\nimport re\n\nfrom Snmp import SnmpWalkTask, GraphiteSnmpData\nfrom polymer.Polymer import ControllerQue... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nfrom multiprocessing import Process, Queue\nfrom Queue import Empty, Full\nfrom datetime import datetime\nfrom copy import deepcopy\nimport json\nimport time\nimport re\n\nfrom Snmp import SnmpWalkTask, GraphiteSnmpData\nfrom polymer.Polymer impo... | ```python
from multiprocessing import Process, Queue
from Queue import Empty, Full
from datetime import datetime
from copy import deepcopy
import json
import time
import re
from Snmp import SnmpWalkTask, GraphiteSnmpData
from polymer.Polymer import ControllerQueue, TaskMgr
""" Gardener.py - Prune unwanted interfaces from your graphite polls
Copyright (C) 2014-2015 David Michael Pennington
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you need to contact the author, you can do so by emailing:
mike [~at~] pennington [/dot\] net
"""
class SnmpData(object):
def __init__(self, input):
self.name = input.get('name', "")
self.mib = input.get('mib', "")
self.table = input.get('table', False)
self.index = input.get('index', "")
self.row_prune_oid = input.get('row_prune_oid', "")
self.row_prune_value = input.get('row_prune_value', "")
self.type_alias = input.get('type_alias', "")
self.oids = input.get('oids', "")
self.instance_prefix = input.get('instance_prefix', "")
self.prune_re = re.compile(self.row_prune_value, re.I)
self.config = {'name': self.name,
'mib': self.mib,
'table': self.table,
'index': self.index,
'row_prune_oid': self.row_prune_oid,
'row_prune_value': self.row_prune_value,
'type_alias': self.type_alias,
'oids': self.oids,
'instance_prefix': self.instance_prefix,
}
def __repr__(self):
return """<SnmpData '{0}' index: {1}>""".format(self.name, self.index)
def __eq__(self, other):
## Consider 'other' the same object, if index is the same
if (self.index.lower()==other.index.lower()):
return True
return False
class Host(object):
def __init__(self, input):
self.name = input.get('name', "")
self.addr = input.get('addr', "")
self.community = input.get('community', "")
self.interval = input.get('interval', 0)
self.snmpdata_names = input.get('snmpdata_names', [])
assert self.snmpdata_names!=[]
self.state = "__INIT__"
self.snmptasks = set([]) # Allocate task objs here
self.metrics = dict()
self.last_poll = datetime(1970, 1, 1) # last poll cycle start
self.config = {'name': self.name,
'addr': self.addr,
'community': self.community,
'interval': self.interval,
'snmpdata_names': self.snmpdata_names}
def __repr__(self):
return """<Host '{0}'>""".format(self.name)
def __eq__(self, other):
## Consider 'other' the same object, if names are the same
if (self.name.lower()==other.name.lower()):
return True
return False
@property
def _hash_value(self):
return self.name.lower()
def __hash__(self):
return hash(self._hash_value)
class Controller(object):
"""Read graphite poll data from config file, and poll devices. This
uses several specialized processes.
poll
tasks task
poll_all_snmp_tasks() <-----> TaskMgr()<----> Worker()
^ c_q |
| | task
| +<----> Worker()
|
Controller()
1. One processes acts as a controller, reads the config, and spawns
poll_all_snmp_tasks()
2. poll_all_snmp_tasks() builds
"""
def __init__(self, configfile='graphitepoll.json'):
assert configfile!=''
try:
config = json.load(open(configfile))
except (Exception) as e:
raise ValueError, "Cannot open '{0}': {1}".format(configfile, e)
## Read json configuration file
self.config = config
self.worker_count = config['worker_count'] # Required
self.worker_cycle_sleep = config['worker_cycle_sleep'] # Required
self.escape_character = config['escape_character'] # Required
self.graphite_prefix = config['graphite_prefix'] # Required
self.graphite_server = config['graphite_server'] # Required
self.graphite_port = config['graphite_port'] # Required
self.hosts = self._init_hosts() # dict of Host objs, keyed by addr
self.snmpdata = self._init_snmpdata() # convert to SnmpData objects
## Spawn Polymer's TaskMgr in a hot loop
c_q = ControllerQueue()
task_proc = Process(target=TaskMgr,
kwargs={'queue':c_q, 'hot_loop':True, 'log_level': 2,
'log_interval': 60,
'worker_count': self.worker_count,
'worker_cycle_sleep': self.worker_cycle_sleep,
})
task_proc.start()
## Send poll tasks to Polymer's TaskMgr
tasks = self.build_snmp_tasks()
poll_proc = Process(target=self.poll_all_snmp_tasks,
kwargs={'c_q': c_q, 'tasks': tasks})
poll_proc.start()
poll_proc.join()
task_proc.join()
def poll_all_snmp_tasks(self, c_q, tasks):
exec_times = list() # Keep execution time stats
finished_tasks = deepcopy(tasks) # Built an object to keep track of
# this poll
while True:
## Send tasks to the task manager on a schedule
for host_addr in tasks.keys():
interval = self.hosts[host_addr].interval
delta_last_poll = (datetime.now() -
self.hosts[host_addr].last_poll).seconds
## Is it time to run the task?
if delta_last_poll>=interval:
## Queue a task list to the TaskMgr process
c_q.from_q.put(finished_tasks[host_addr])
## Reset all known tasks for this host...
finished_tasks[host_addr] = list() # reset results
finished = False
else:
finished = True
time.sleep(self.calc_wait_time(c_q, exec_times,
finished_tasks))
## Read tasks from the task manager
while not finished:
try:
## Poll queue for completed task
task = c_q.to_q.get_nowait()
finished_tasks[task.addr].append(task)
exec_times.append(task.task_stop - task.task_start)
## Check whether we are finished with this host...
num_tasks_required = len(tasks[task.addr])
num_tasks_finished = len(finished_tasks[task.addr])
if num_tasks_finished==num_tasks_required:
## Record time of this poll
self.hosts[task.addr].last_poll = datetime.now()
## Write to graphite
GraphiteSnmpData(self.hosts[task.addr],
self.snmpdata,
finished_tasks[task.addr],
server=self.graphite_server,
port=self.graphite_port,
prefix=self.graphite_prefix,
escape_character=self.escape_character,
dry_run=True)
## Reset finished_tasks
finished_tasks[task.addr] = deepcopy(tasks[task.addr])
finished = True
except Empty:
exec_times = exec_times[-400:] # Only keep the last 400
time.sleep(self.calc_wait_time(c_q, exec_times,
finished_tasks))
def calc_wait_time(self, c_q, exec_times, finished_tasks):
"""Calculate the loop wait time"""
num_samples = float(len(exec_times))
num_tasks = sum([len(finished_tasks[addr]) for addr in finished_tasks.keys()])
if num_samples>0.0:
queue_size = max(c_q.from_q.qsize(),1.0)
min_task_time = min(exec_times)
try:
wait_time = min_task_time/(num_tasks*queue_size)
except:
wait_time = 0.00001 # Default to 10us
else:
wait_time = 0.00001 # Default to 10us
return wait_time
def _init_snmpdata(self):
"""Return a dictionary of SnmpData objects, keyed by their name"""
snmpdata = dict()
for vals in self.config.get('snmpdata', {}):
obj = SnmpData(vals)
if snmpdata.get(obj.name, False):
# We already have an entry for this ...
raise ValueError
snmpdata[obj.name] = obj
assert snmpdata!={}, "FATAL: 'snmpdata' was not specified correctly in the config"
return snmpdata
def _init_hosts(self):
"""Return a dictionary of Host objects, keyed by their addr"""
hosts = dict()
for vals in self.config.get('hosts', {}):
obj = Host(vals)
if hosts.get(obj.addr, False):
# We already have an entry for this host...
raise ValueError
hosts[obj.addr] = obj
return hosts
def build_snmp_tasks(self):
"""return a dict of host tasks, tiered by poll interval and host addr"""
## Algorithm: allocate hosts on a schedule
## allocate tasks for each host
all_tasks = dict()
all_hosts = self.hosts.keys()
for host_addr in all_hosts:
host_tasks = list()
# host_name will be a string key into the self.hosts dict
for snmpdata_name in self.hosts[host_addr].config['snmpdata_names']:
# snmpdata_name is a string key into the
host_obj = self.hosts[host_addr]
snmpdata_obj = self.snmpdata[snmpdata_name]
## Append an index task
index_task = SnmpWalkTask(host_addr,
host_obj.community,
snmpdata_obj.mib,
snmpdata_obj.index,
index = True,
snmpdata=snmpdata_name,
value_type=str)
host_tasks.append(index_task)
## Append a task to find values that we use for pruning
prune_task = SnmpWalkTask(host_addr,
host_obj.community,
snmpdata_obj.mib,
snmpdata_obj.row_prune_oid,
row_prune_value = snmpdata_obj.row_prune_value,
snmpdata=snmpdata_name,
value_type=str)
host_tasks.append(prune_task)
## Append one task per oid
for oid in snmpdata_obj.oids:
task = SnmpWalkTask(host_addr,
host_obj.community,
snmpdata_obj.mib,
oid,
snmpdata=snmpdata_name,
value_type=str)
host_tasks.append(task)
all_tasks[host_addr] = host_tasks
return all_tasks
if __name__=="__main__":
Controller()
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nimport collections\nimport json\nfrom logging import getLogger\n\nfrom ckan.lib.helpers import url_for\nfrom ckan.common import config\nfrom ckanext.package_converter.model.metadata_format import MetadataFormats\nfrom ckanext.package_converter.... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nimport collections\nimport json\nfrom logging import getLogger\n\nfrom ckan.lib.helpers import url_for\nfrom ckan.common import config\nfrom ckanext.package_converter.model.metadata_format import MetadataFormats\nfrom ckanext.pa... | ```python
import collections
import json
from logging import getLogger
from ckan.lib.helpers import url_for
from ckan.common import config
from ckanext.package_converter.model.metadata_format import MetadataFormats
from ckanext.package_converter.model.scheming_converter import Datacite43SchemingConverter
from xmltodict import unparse, parse
log = getLogger(__name__)
class Datacite43SchemingResourceConverter(Datacite43SchemingConverter):
def __init__(self):
Datacite43SchemingConverter.__init__(self)
ckan_resource_base_format = MetadataFormats().get_metadata_formats('ckan_resource')[0]
self.input_format = ckan_resource_base_format
def _datacite_converter_schema(self, resource_dict):
try:
schema_map = self._get_schema_map(self.output_format.get_format_name())
metadata_resource_map = schema_map['metadata_resource']
datacite_dict = collections.OrderedDict()
# Header
datacite_dict['resource'] = collections.OrderedDict()
datacite_dict['resource']['@xsi:schemaLocation'] = '{namespace} {schema}'.format(
namespace=self.output_format.get_namespace(),
schema=self.output_format.get_xsd_url())
datacite_dict['resource']['@xmlns'] = '{namespace}'.format(namespace=self.output_format.get_namespace())
datacite_dict['resource']['@xmlns:xsi'] = 'http://www.w3.org/2001/XMLSchema-instance'
# Identifier*
datacite_identifier_tag = 'identifier'
datacite_dict['resource'][datacite_identifier_tag] = {
'#text': self._get_single_mapped_value(datacite_identifier_tag, resource_dict, metadata_resource_map),
'@identifierType': 'DOI'}
# Titles*
datacite_titles_tag = 'titles'
datacite_title_tag = 'title'
datacite_xml_lang_tag = 'xml:lang'
datacite_dict['resource'][datacite_titles_tag] = {datacite_title_tag: []}
datacite_title_type_tag = 'titleType'
ckan_titles = self._get_complex_mapped_value(datacite_titles_tag, datacite_title_tag,
['', datacite_title_type_tag, datacite_xml_lang_tag],
resource_dict, metadata_resource_map)
for ckan_title in ckan_titles:
datacite_title = {'#text': ckan_title.get(datacite_title_tag, ''),
'@' + datacite_xml_lang_tag: ckan_title.get(
self._joinTags([datacite_title_tag, datacite_xml_lang_tag]), 'en-us')}
if ckan_title.get(self._joinTags([datacite_title_tag, datacite_title_type_tag]), ''):
ckan_title_type = ckan_title.get(self._joinTags([datacite_title_tag, datacite_title_type_tag]),
'other')
datacite_title['@' + datacite_title_type_tag] = self._valueToDataciteCV(ckan_title_type,
datacite_title_type_tag)
datacite_dict['resource'][datacite_titles_tag][datacite_title_tag] += [datacite_title]
# Alternate Identifier (CKAN URL) Decide which is landing page, resource or package
ckan_resource_url = config.get('ckan.site_url', '') + url_for(controller='dataset_resource',
action='read',
id=resource_dict.get('package_id',
''),
resource_id=resource_dict.get('id',
''))
datacite_dict['resource']['alternateIdentifiers'] = {
'alternateIdentifier': [{'#text': ckan_resource_url, '@alternateIdentifierType': 'URL'}]}
# Sizes (not defined in scheming, taken from default CKAN resource)
datacite_size_group_tag = 'sizes'
datacite_size_tag = 'size'
datacite_sizes = []
log.debug('** SIZE *** ' + resource_dict.get('resource_size', ''))
if resource_dict.get('size', ''):
datacite_sizes += [{'#text': str(resource_dict.get('size', ' ')) + ' bytes'}]
elif resource_dict.get('resource_size', ''):
resource_size = resource_dict.get('resource_size', '')
try:
resource_size_obj = json.loads(resource_size)
datacite_sizes += [{'#text': resource_size_obj.get('size_value', '0') + ' ' + resource_size_obj.get(
'size_unit', 'KB').upper()}]
except:
log.error('unparseable value at resource_size:' + str(resource_size))
if datacite_sizes:
datacite_dict['resource'][datacite_size_group_tag] = {datacite_size_tag: datacite_sizes}
# Formats (get from resources)
datacite_format_group_tag = 'formats'
datacite_format_tag = 'format'
datacite_formats = []
resource_format = self._get_single_mapped_value(
self._joinTags([datacite_format_group_tag, datacite_format_tag]),
resource_dict, metadata_resource_map,
default=resource_dict.get('mimetype', resource_dict.get('mimetype_inner', '')))
if resource_format:
datacite_formats += [{'#text': resource_format}]
if datacite_formats:
datacite_dict['resource'][datacite_format_group_tag] = {datacite_format_tag: datacite_formats}
# Version
datacite_version_tag = 'version'
datacite_version = self._get_single_mapped_value(datacite_version_tag, resource_dict, metadata_resource_map,
'')
if datacite_version:
datacite_dict['resource'][datacite_version_tag] = {'#text': datacite_version}
# Description
datacite_descriptions_tag = 'descriptions'
datacite_description_tag = 'description'
datacite_description_type_tag = 'descriptionType'
datacite_descriptions = []
ckan_descriptions = self._get_complex_mapped_value(datacite_descriptions_tag, datacite_description_tag,
['', datacite_xml_lang_tag,
datacite_description_type_tag], resource_dict,
metadata_resource_map)
for ckan_description in ckan_descriptions:
datacite_description = {'#text': ckan_description.get(datacite_description_tag, ''),
'@' + datacite_description_type_tag: ckan_description.get(
self._joinTags([datacite_description_tag, datacite_description_type_tag]),
'Abstract'),
'@' + datacite_xml_lang_tag: ckan_description.get(
self._joinTags([datacite_description_tag, datacite_xml_lang_tag]), 'en-us')}
datacite_descriptions += [datacite_description]
if datacite_descriptions:
datacite_dict['resource'][datacite_descriptions_tag] = {datacite_description_tag: datacite_descriptions}
# inherit from package
package_dict = resource_dict.get('package_dict')
if package_dict:
datacite_package_dict = parse(
super(Datacite43SchemingResourceConverter, self)._datacite_converter_schema(package_dict))
datacite_dict['resource'] = self._inherit_from_package(datacite_dict['resource'],
datacite_package_dict['resource'])
# Convert to xml
converted_package = unparse(datacite_dict, pretty=True)
except Exception as e:
log.exception(e)
return None
return converted_package
def _inherit_from_package(self, datacite_dict, datacite_package_dict):
def merge_dict_lists(dict1, dict2):
for key in dict1.keys():
if type(dict1[key]) is list:
list1 = dict1[key]
list2 = dict2.get(key, [])
if type(dict2.get(key, [])) is not list:
list2 = [list2]
for item in list2:
if item not in list1:
dict1[key] += [item]
return dict1
try:
# values from the resource are added or replace the package
replace = ['identifier', 'sizes', 'version', 'formats', 'resourceType', 'alternateIdentifiers']
for key in datacite_dict.keys():
if (key in replace) or (type(datacite_dict[key]) is not dict):
datacite_package_dict[key] = datacite_dict[key]
else:
datacite_package_dict[key] = merge_dict_lists(datacite_dict[key],
datacite_package_dict.get(key, {}))
return (datacite_package_dict)
except Exception as e:
log.exception(e)
return datacite_dict
``` |
[
{
"content": "Replicate the source code:\n```python\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by app... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless... | ```python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# keystone documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 9 12:02:59 2012.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
import warnings
# NOTE(dstanek): adds _ to the builtins so keystone modules can be imported
__builtins__['_'] = str
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'oslo_config.sphinxconfiggen',
'oslo_policy.sphinxpolicygen',
'oslosphinx',
'ext.support_matrix',
]
config_generator_config_file = '../../config-generator/keystone.conf'
sample_config_basename = '_static/keystone'
policy_generator_config_file = (
'../../config-generator/keystone-policy-generator.conf'
)
sample_policy_basename = '_static/keystone'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# if os.getenv('HUDSON_PUBLISH_DOCS'):
# templates_path = ['_ga', '_templates']
# else:
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'keystone'
copyright = u'2012, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['old']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keystone.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/keystone-manage', 'keystone-manage', u'Keystone Management Utility',
[u'OpenStack'], 1)
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.check_output(
git_cmd).decode('utf-8')
except Exception:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'keystonedoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'keystone.tex', u'Keystone Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'keystone', u'Keystone Documentation',
u'OpenStack', 'keystone', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Copyright 2010 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Copyright 2010 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obt... | ```python
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import mock
from oslo_i18n import fixture as i18n_fixture
from oslo_serialization import jsonutils
import webob.dec
from <project_name>.api import common
from <project_name>.api.openstack import wsgi
from <project_name>.i18n import _
from <project_name> import test
class TestFaults(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def setUp(self):
super(TestFaults, self).setUp()
self.useFixture(i18n_fixture.ToggleLazy(True))
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn('whut?', resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn('resizeNotAllowed', resp.body)
self.assertIn('forbidden', resp.body)
@mock.patch('<project_name>.api.openstack.wsgi.i18n.translate')
def test_raise_http_with_localized_explanation(self, mock_translate):
params = ('blah', )
expl = _("String with params: %s") % params
def _mock_translation(msg, locale):
return "Mensaje traducido"
mock_translate.side_effect = _mock_translation
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn(("Mensaje traducido"), resp.body)
self.stubs.UnsetAll()
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
def test_xml_serializer(self):
"""Ensure that a v1 request responds with a v1 xmlns."""
request = webob.Request.blank('/v1',
headers={"Accept": "application/xml"})
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
self.assertIn(common.XML_NS_V1, response.body)
self.assertEqual("application/xml", response.content_type)
self.assertEqual(400, response.status_int)
class FaultsXMLSerializationTestV11(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
class FaultsXMLSerializationTestV2(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
``` |
[
{
"content": "```python\n# Unit tests for cache framework\n# Uses whatever cache backend is set in the test settings file.\nimport copy\nimport io\nimport os\nimport pickle\nimport re\nimport shutil\nimport tempfile\nimport threading\nimport time\nimport unittest\nimport warnings\nfrom unittest import mock\n\nf... | [
{
"content": "<|memory_start|>```python\n# Unit tests for cache framework\n# Uses whatever cache backend is set in the test settings file.\nimport copy\nimport io\nimport os\nimport pickle\nimport re\nimport shutil\nimport tempfile\nimport threading\nimport time\nimport unittest\nimport warnings\nfrom unittest ... | ```python
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'tcp_nodelay': True},
))
def test_pylibmc_legacy_options(self):
deprecation_message = (
"Specifying pylibmc cache behaviors as a top-level property "
"within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named "
"`behaviors` inside `OPTIONS` instead."
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
self.assertEqual(len(warns), 1)
self.assertIsInstance(warns[0].message, RemovedInDjango21Warning)
self.assertEqual(str(warns[0].message), deprecation_message)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_enoent_errno_values(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Timezone-dependent cache keys should use ASCII characters only
# (#17476). The implementation here is a bit odd (timezone.utc is an
# instance, not a class), but it simulates the correct conditions.
class CustomTzName(timezone.utc):
pass
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName):
CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@ignore_warnings(category=RemovedInDjango21Warning) # USE_ETAGS=True
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# ETags are used.
self.assertTrue(get_cache_data.has_header('ETag'))
# ETags can be disabled.
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@ignore_warnings(category=RemovedInDjango21Warning)
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
``` |
[
{
"content": "Repeat the code precisely:\n```python\nfrom rllab.envs.mujoco.mujoco_env import MujocoEnv\nfrom rllab.core.serializable import Serializable\nfrom rllab.envs.base import Step\nfrom rllab.misc.overrides import overrides\nfrom rllab.misc import logger\n\nfrom rllab.envs.mujoco.mujoco_env import q_mul... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom rllab.envs.mujoco.mujoco_env import MujocoEnv\nfrom rllab.core.serializable import Serializable\nfrom rllab.envs.base import Step\nfrom rllab.misc.overrides import overrides\nfrom rllab.misc import logger\n\nfrom rllab.envs.mujoco.mujoco_... | ```python
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
from rllab.envs.mujoco.mujoco_env import q_mult, q_inv
import numpy as np
import math
class AntEnv(MujocoEnv, Serializable):
FILE = 'ant.xml'
ORI_IND = 3
def __init__(self, *args, **kwargs):
super(AntEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
np.clip(self.model.data.cfrc_ext, -1, 1).flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
@overrides
def get_ori(self):
ori = [0, 1, 0, 0]
rot = self.model.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion
ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane
ori = math.atan2(ori[1], ori[0])
return ori
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.