text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import, unicode_literals
import collections
import warnings
from importlib import import_module
from django import forms
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
# unicode_literals ensures that any render / __str__ methods returning HTML via calls to mark_safe / format_html
# return a SafeText, not SafeBytes; necessary so that it doesn't get re-encoded when the template engine
# calls force_text, which would cause it to lose its 'safe' flag
from wagtail.utils.deprecation import RemovedInWagtail111Warning
from wagtail.wagtailcore.blocks.utils import accepts_kwarg
__all__ = ['BaseBlock', 'Block', 'BoundBlock', 'DeclarativeSubBlocksMetaclass', 'BlockWidget', 'BlockField']
# =========================================
# Top-level superclasses and helper objects
# =========================================
class BaseBlock(type):
def __new__(mcs, name, bases, attrs):
meta_class = attrs.pop('Meta', None)
cls = super(BaseBlock, mcs).__new__(mcs, name, bases, attrs)
# Get all the Meta classes from all the bases
meta_class_bases = [meta_class] + [getattr(base, '_meta_class', None)
for base in bases]
meta_class_bases = tuple(filter(bool, meta_class_bases))
cls._meta_class = type(str(name + 'Meta'), meta_class_bases, {})
return cls
class Block(six.with_metaclass(BaseBlock, object)):
name = ''
creation_counter = 0
TEMPLATE_VAR = 'value'
class Meta(object):
label = None
icon = "placeholder"
classname = None
"""
Setting a 'dependencies' list serves as a shortcut for the common case where a complex block type
(such as struct, list or stream) relies on one or more inner block objects, and needs to ensure that
the responses from the 'media' and 'html_declarations' include the relevant declarations for those inner
blocks, as well as its own. Specifying these inner block objects in a 'dependencies' list means that
the base 'media' and 'html_declarations' methods will return those declarations; the outer block type can
then add its own declarations to the list by overriding those methods and using super().
"""
dependencies = []
def __new__(cls, *args, **kwargs):
# adapted from django.utils.deconstruct.deconstructible; capture the arguments
# so that we can return them in the 'deconstruct' method
obj = super(Block, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def all_blocks(self):
"""
Return a list consisting of self and all block objects that are direct or indirect dependencies
of this block
"""
result = [self]
for dep in self.dependencies:
result.extend(dep.all_blocks())
return result
def all_media(self):
media = forms.Media()
for block in self.all_blocks():
media += block.media
return media
def all_html_declarations(self):
declarations = filter(bool, [block.html_declarations() for block in self.all_blocks()])
return mark_safe('\n'.join(declarations))
def __init__(self, **kwargs):
self.meta = self._meta_class()
for attr, value in kwargs.items():
setattr(self.meta, attr, value)
# Increase the creation counter, and save our local copy.
self.creation_counter = Block.creation_counter
Block.creation_counter += 1
self.definition_prefix = 'blockdef-%d' % self.creation_counter
self.label = self.meta.label or ''
def set_name(self, name):
self.name = name
if not self.meta.label:
self.label = capfirst(force_text(name).replace('_', ' '))
@property
def media(self):
return forms.Media()
def html_declarations(self):
"""
Return an HTML fragment to be rendered on the form page once per block definition -
as opposed to once per occurrence of the block. For example, the block definition
ListBlock(label="Shopping list", CharBlock(label="Product"))
needs to output a <script type="text/template"></script> block containing the HTML for
a 'product' text input, to that these can be dynamically added to the list. This
template block must only occur once in the page, even if there are multiple 'shopping list'
blocks on the page.
Any element IDs used in this HTML fragment must begin with definition_prefix.
(More precisely, they must either be definition_prefix itself, or begin with definition_prefix
followed by a '-' character)
"""
return ''
def js_initializer(self):
"""
Returns a Javascript expression string, or None if this block does not require any
Javascript behaviour. This expression evaluates to an initializer function, a function that
takes the ID prefix and applies JS behaviour to the block instance with that value and prefix.
The parent block of this block (or the top-level page code) must ensure that this
expression is not evaluated more than once. (The resulting initializer function can and will be
called as many times as there are instances of this block, though.)
"""
return None
def render_form(self, value, prefix='', errors=None):
"""
Render the HTML for this block with 'value' as its content.
"""
raise NotImplementedError('%s.render_form' % self.__class__)
def value_from_datadict(self, data, files, prefix):
raise NotImplementedError('%s.value_from_datadict' % self.__class__)
def value_omitted_from_data(self, data, files, name):
"""
Used only for top-level blocks wrapped by BlockWidget (i.e.: typically only StreamBlock)
to inform ModelForm logic on Django >=1.10.2 whether the field is absent from the form
submission (and should therefore revert to the field default).
"""
return name not in data
def bind(self, value, prefix=None, errors=None):
"""
Return a BoundBlock which represents the association of this block definition with a value
and a prefix (and optionally, a ValidationError to be rendered).
BoundBlock primarily exists as a convenience to allow rendering within templates:
bound_block.render() rather than blockdef.render(value, prefix) which can't be called from
within a template.
"""
return BoundBlock(self, value, prefix=prefix, errors=errors)
def get_default(self):
"""
Return this block's default value (conventionally found in self.meta.default),
converted to the value type expected by this block. This caters for the case
where that value type is not something that can be expressed statically at
model definition type (e.g. something like StructValue which incorporates a
pointer back to the block definion object).
"""
return self.meta.default
def prototype_block(self):
"""
Return a BoundBlock that can be used as a basis for new empty block instances to be added on the fly
(new list items, for example). This will have a prefix of '__PREFIX__' (to be dynamically replaced with
a real prefix when it's inserted into the page) and a value equal to the block's default value.
"""
return self.bind(self.get_default(), '__PREFIX__')
def clean(self, value):
"""
Validate value and return a cleaned version of it, or throw a ValidationError if validation fails.
The thrown ValidationError instance will subsequently be passed to render() to display the
error message; the ValidationError must therefore include all detail necessary to perform that
rendering, such as identifying the specific child block(s) with errors, in the case of nested
blocks. (It is suggested that you use the 'params' attribute for this; using error_list /
error_dict is unreliable because Django tends to hack around with these when nested.)
"""
return value
def to_python(self, value):
"""
Convert 'value' from a simple (JSON-serialisable) value to a (possibly complex) Python value to be
used in the rest of the block API and within front-end templates . In simple cases this might be
the value itself; alternatively, it might be a 'smart' version of the value which behaves mostly
like the original value but provides a native HTML rendering when inserted into a template; or it
might be something totally different (e.g. an image chooser will use the image ID as the clean
value, and turn this back into an actual image object here).
"""
return value
def get_prep_value(self, value):
"""
The reverse of to_python; convert the python value into JSON-serialisable form.
"""
return value
def get_context(self, value, parent_context=None):
"""
Return a dict of context variables (derived from the block value and combined with the parent_context)
to be used as the template context when rendering this value through a template.
"""
context = parent_context or {}
context.update({
'self': value,
self.TEMPLATE_VAR: value,
})
return context
def render(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template (with the passed context, supplemented by the result of get_context) if a
'template' property is specified on the block, and fall back on render_basic otherwise.
"""
template = getattr(self.meta, 'template', None)
if not template:
return self.render_basic(value, context=context)
if not accepts_kwarg(self.get_context, 'parent_context'):
class_with_render_method = next(
(cls for cls in type(self).__mro__ if 'get_context' in cls.__dict__),
type(self)
)
warnings.warn(
"The get_context method on %s needs to be updated to accept an optional 'parent_context' "
"keyword argument" % class_with_render_method,
category=RemovedInWagtail111Warning
)
new_context = context
new_context.update(self.get_context(value))
return mark_safe(render_to_string(template, new_context))
if context is None:
new_context = self.get_context(value)
else:
new_context = self.get_context(value, parent_context=dict(context))
return mark_safe(render_to_string(template, new_context))
def get_api_representation(self, value, context=None):
"""
Can be used to customise the API response and defaults to the value returned by get_prep_value.
"""
return self.get_prep_value(value)
def render_basic(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. render() will fall back on
this if the block does not define a 'template' property.
"""
return force_text(value)
def get_searchable_content(self, value):
"""
Returns a list of strings containing text content within this block to be used in a search engine.
"""
return []
def check(self, **kwargs):
"""
Hook for the Django system checks framework -
returns a list of django.core.checks.Error objects indicating validity errors in the block
"""
return []
def _check_name(self, **kwargs):
"""
Helper method called by container blocks as part of the system checks framework,
to validate that this block's name is a valid identifier.
(Not called universally, because not all blocks need names)
"""
errors = []
if not self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block name cannot be empty",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if ' ' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block names cannot contain spaces",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if '-' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot contain dashes",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if self.name and self.name[0].isdigit():
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
return errors
def id_for_label(self, prefix):
"""
Return the ID to be used as the 'for' attribute of <label> elements that refer to this block,
when the given field prefix is in use. Return None if no 'for' attribute should be used.
"""
return None
@property
def required(self):
"""
Flag used to determine whether labels for this block should display a 'required' asterisk.
False by default, since Block does not provide any validation of its own - it's up to subclasses
to define what required-ness means.
"""
return False
def deconstruct(self):
# adapted from django.utils.deconstruct.deconstructible
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
% (name, module_name))
# if the module defines a DECONSTRUCT_ALIASES dictionary, see if the class has an entry in there;
# if so, use that instead of the real path
try:
path = module.DECONSTRUCT_ALIASES[self.__class__]
except (AttributeError, KeyError):
path = '%s.%s' % (module_name, name)
return (
path,
self._constructor_args[0],
self._constructor_args[1],
)
def __eq__(self, other):
"""
The deep_deconstruct method in django.db.migrations.autodetector.MigrationAutodetector does not
recurse into arbitrary lists and dicts. As a result, when it is passed a field such as:
StreamField([
('heading', CharBlock()),
])
the CharBlock object will be left in its constructed form. This causes problems when
MigrationAutodetector compares two separate instances of the StreamField from different project
states: since the CharBlocks are different objects, it will report a change where there isn't one.
To prevent this, we implement the equality operator on Block instances such that the two CharBlocks
are reported as equal. Since block objects are intended to be immutable with the exception of
set_name(), it is sufficient to compare the 'name' property and the constructor args/kwargs of the
two block objects. The 'deconstruct' method provides a convenient way to access the latter.
"""
if not isinstance(other, Block):
# if the other object isn't a block at all, it clearly isn't equal.
return False
# Note that we do not require the two blocks to be of the exact same class. This is because
# we may wish the following blocks to be considered equal:
#
# class FooBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# class BarBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# FooBlock() == BarBlock() == StructBlock([('first_name', CharBlock()), ('surname': CharBlock())])
#
# For this to work, StructBlock will need to ensure that 'deconstruct' returns the same signature
# in all of these cases, including reporting StructBlock as the path:
#
# FooBlock().deconstruct() == (
# 'wagtail.wagtailcore.blocks.StructBlock',
# [('first_name', CharBlock()), ('surname': CharBlock())],
# {}
# )
#
# This has the bonus side effect that the StructBlock field definition gets frozen into
# the migration, rather than leaving the migration vulnerable to future changes to FooBlock / BarBlock
# in models.py.
return (self.name == other.name) and (self.deconstruct() == other.deconstruct())
def __ne__(self, other):
return not self.__eq__(other)
# Making block instances hashable in a way that's consistent with __eq__ is non-trivial, because
# self.deconstruct() is liable to contain unhashable data (e.g. lists and dicts). So let's set
# Block to be explicitly unhashable - Python 3 will do this automatically when defining __eq__,
# but Python 2 won't, and we'd like the behaviour to be consistent on both.
__hash__ = None
@python_2_unicode_compatible
class BoundBlock(object):
def __init__(self, block, value, prefix=None, errors=None):
self.block = block
self.value = value
self.prefix = prefix
self.errors = errors
def render_form(self):
return self.block.render_form(self.value, self.prefix, errors=self.errors)
def render(self, context=None):
return self.block.render(self.value, context=context)
def render_as_block(self, context=None):
"""
Alias for render; the include_block tag will specifically check for the presence of a method
with this name. (This is because {% include_block %} is just as likely to be invoked on a bare
value as a BoundBlock. If we looked for a `render` method instead, we'd run the risk of finding
an unrelated method that just happened to have that name - for example, when called on a
PageChooserBlock it could end up calling page.render.
"""
return self.block.render(self.value, context=context)
def id_for_label(self):
return self.block.id_for_label(self.prefix)
def __str__(self):
"""Render the value according to the block's native rendering"""
return self.block.render(self.value)
class DeclarativeSubBlocksMetaclass(BaseBlock):
"""
Metaclass that collects sub-blocks declared on the base classes.
(cheerfully stolen from https://github.com/django/django/blob/master/django/forms/forms.py)
"""
def __new__(mcs, name, bases, attrs):
# Collect sub-blocks declared on the current class.
# These are available on the class as `declared_blocks`
current_blocks = []
for key, value in list(attrs.items()):
if isinstance(value, Block):
current_blocks.append((key, value))
value.set_name(key)
attrs.pop(key)
current_blocks.sort(key=lambda x: x[1].creation_counter)
attrs['declared_blocks'] = collections.OrderedDict(current_blocks)
new_class = (super(DeclarativeSubBlocksMetaclass, mcs).__new__(
mcs, name, bases, attrs))
# Walk through the MRO, collecting all inherited sub-blocks, to make
# the combined `base_blocks`.
base_blocks = collections.OrderedDict()
for base in reversed(new_class.__mro__):
# Collect sub-blocks from base class.
if hasattr(base, 'declared_blocks'):
base_blocks.update(base.declared_blocks)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in base_blocks:
base_blocks.pop(attr)
new_class.base_blocks = base_blocks
return new_class
# ========================
# django.forms integration
# ========================
class BlockWidget(forms.Widget):
"""Wraps a block object as a widget so that it can be incorporated into a Django form"""
# Flag used by Django 1.10.1 (only) to indicate that this widget will not necessarily submit
# a postdata item with a name that matches the field name -
# see https://github.com/django/django/pull/7068, https://github.com/wagtail/wagtail/issues/2994
dont_use_model_field_default_for_empty_data = True
def __init__(self, block_def, attrs=None):
super(BlockWidget, self).__init__(attrs=attrs)
self.block_def = block_def
def render_with_errors(self, name, value, attrs=None, errors=None):
bound_block = self.block_def.bind(value, prefix=name, errors=errors)
js_initializer = self.block_def.js_initializer()
if js_initializer:
js_snippet = """
<script>
$(function() {
var initializer = %s;
initializer('%s');
})
</script>
""" % (js_initializer, name)
else:
js_snippet = ''
return mark_safe(bound_block.render_form() + js_snippet)
def render(self, name, value, attrs=None):
return self.render_with_errors(name, value, attrs=attrs, errors=None)
@property
def media(self):
return self.block_def.all_media()
def value_from_datadict(self, data, files, name):
return self.block_def.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.block_def.value_omitted_from_data(data, files, name)
class BlockField(forms.Field):
"""Wraps a block object as a form field so that it can be incorporated into a Django form"""
def __init__(self, block=None, **kwargs):
if block is None:
raise ImproperlyConfigured("BlockField was not passed a 'block' object")
self.block = block
if 'widget' not in kwargs:
kwargs['widget'] = BlockWidget(block)
super(BlockField, self).__init__(**kwargs)
def clean(self, value):
return self.block.clean(value)
DECONSTRUCT_ALIASES = {
Block: 'wagtail.wagtailcore.blocks.Block',
}
|
{
"content_hash": "98eb4a756eef47c4f2f12bd7d1f39c9a",
"timestamp": "",
"source": "github",
"line_count": 563,
"max_line_length": 114,
"avg_line_length": 41.619893428063946,
"alnum_prop": 0.6264510071696825,
"repo_name": "nutztherookie/wagtail",
"id": "1531762983998db72acfea526fc6468c55204c50",
"size": "23432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailcore/blocks/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "179741"
},
{
"name": "HTML",
"bytes": "316772"
},
{
"name": "JavaScript",
"bytes": "124435"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2867938"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
}
|
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
infos = GSGlyphsInfo.alloc().init()
font = Glyphs.font
build_glyphs = False
names = []
for info in infos.glyphInfos():
if (
not info.components
or info.name in font.glyphs
or info.name.endswith(".half")
or info.name.endswith(".full")
or info.name.endswith("mod")
or info.name.endswith("-math")
):
continue
component_missing = False
for c in info.components:
if c.name not in font.glyphs:
# print "Skipping %s because of missing component %s..." % (info.name, c.name)
component_missing = True
break
if component_missing:
continue
if build_glyphs:
glyph = GSGlyph(info.name)
font.glyphs.append(GSGlyph(info.name))
glyph = Glyphs.font.glyphs[info.name]
for layer in glyph.layers:
for c in info.components:
layer.components.append(GSComponent(c.name))
else:
names.append(info.name)
print(" ".join(names))
|
{
"content_hash": "b4b5078d3edeb58b315cb168ab0b3ca9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 24.282608695652176,
"alnum_prop": 0.5926589077887198,
"repo_name": "jenskutilek/Glyphs-Scripts",
"id": "09a4bf26edc0edaf66a54951c87d0a095ef274b7",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Glyphs/Add All Possible Glyphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78578"
}
],
"symlink_target": ""
}
|
import sys
from conftest import rvo_output, rvo_err
from click.testing import CliRunner
from rvo import cli
def test_add_all_parameters(isatty_true):
options = ['add', '-t', 'test', '-c', 'test', '--content', 'test']
output = ['Document "test" created.']
rvo_output(options,output)
def test_add_tags(isatty_true):
options = ['add', '-t', 'test', '--content', 'test']
output = ['Document "test" created.']
rvo_output(options,output)
def test_add_title_test(isatty_true):
options = ['add', '-t', 'test', '--content', 'THIS IS A TITLE']
output = ['Document "THIS IS A TITLE" created.']
rvo_output(options,output)
def test_add_title_test_gnarf(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-c', 'töstcät', '-x', 'gnarf'])
assert not result.exception
assert result.output.strip().endswith('Document "gnarf" created.')
def test_add_title_test_gnarf(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-c', 'töstcät', '-x', 'gnarf\nfoo'])
assert not result.exception
assert result.output.strip().endswith('Document "gnarf" created.')
def test_add_title_test_hashtag(isatty_true):
options = ['add', '-t', 'test', '--content', '# THIS IS A TITLE']
output = ['Document "THIS IS A TITLE" created.']
rvo_output(options,output)
def test_add_title_test_hashtag(isatty_true):
options = ['add', '-t', 'test', '--content', '# THIS IS A TITLE\nmutliline']
output = ['Document "THIS IS A TITLE" created.']
rvo_output(options,output)
def test_add_very_long_title(isatty_true):
options = ['add', '-t', 'test', '--content', '# THIS IS A VERY VERY LONG NEVER ENDING TITLE THAT EXCEEDS LIMITS']
output = ['Document "THIS IS A VERY VERY LONG NEVER ENDING TITLE THAT E" created.']
rvo_output(options,output)
def test_add_no_parameters(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_one_parameters_tag(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-t', 'testtag'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_utf8_cat(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-c', 'töstcät'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_utf8_cat_multi(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-c', 'tüütüü', '-c', 'töstcät'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_utf8_tag(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-t', 'töstcät'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_utf8_tag_multi(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-t', 'tüütüü', '-t', 'töstcät'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_encrypt_by_parameter_wrong_pw(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e', '-p', 'thispasswordistotallywrong', '-t', 'encryption', '-c', 'test'])
assert result.output.strip().endswith('Invalid Password')
assert result.exception
def test_add_encrypt_by_parameter(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e', '-p', 'test123', '-t', 'encryption', '-c', 'test'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_encrypt_by_input(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e', '-t', 'encryption', '-c', 'test'], input="test123\n")
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_encrypt_by_input_with_content(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e', '-t', 'encryption', '-x', 'TEST', '-c', 'test'], input="test123\n")
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_encrypt_by_input_wrong_pw(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e', '-t', 'encryption', '-c', 'test'], input="test2123\n")
assert result.output.strip().endswith('Invalid Password')
assert result.exception
def test_add_read_from_stdin(isatty_false):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add'], input="Schwifty\nSchwifty..lol\nMorty\n\n")
assert result.output.strip().endswith('Document "Schwifty" created.')
assert not result.exception
def test_add_read_from_stdin_with_cat(isatty_false):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-c', 'test'], input="Schwifty\nSchwifty..lol\nMorty\n\n")
assert result.output.strip().endswith('Document "Schwifty" created.')
assert not result.exception
def test_add_read_from_stdin_with_tag(isatty_false):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-t', 'tag'], input="Schwifty\nSchwifty..lol\nMorty\n\n")
assert not result.exception
assert result.output.strip().endswith('Document "Schwifty" created.')
def test_add_conflicting_stdin_reading(isatty_false):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-e'], input="Schwifty\nSchwifty..lol\nMorty\n\n")
assert result.exception
assert result.output.strip().endswith('Invalid Password')
def test_add_location_germany(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-l', 'Nuremberg', '-c', 'test'])
assert result.output.strip().endswith('Document "TEST" created.')
assert not result.exception
def test_add_location_invalid(isatty_true):
runner = CliRunner()
result = runner.invoke(cli.cli, ['add', '-l', 'DOESNOTEXISTTOWNATLEASTIHOPE', '-c', 'test'])
assert result.exception
|
{
"content_hash": "f3c656fac22c9e8bb6d3154d825ff145",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 120,
"avg_line_length": 42.31972789115646,
"alnum_prop": 0.6651663719659219,
"repo_name": "noqqe/rvo",
"id": "27e97720b7f7607b81cff91683a91c7ab63ddf6f",
"size": "6288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_add.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96671"
},
{
"name": "Shell",
"bytes": "724"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_evil_hermit_small2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "4bc308d0ab322a1d21557aba1d45b8a3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.6964856230031949,
"repo_name": "anhstudios/swganh",
"id": "9d70b6ea78e8926f52044988ae1af50496ee769e",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/poi/shared_tatooine_evil_hermit_small2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import mock
import tempfile
from . import resize_image
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestOutputValidation():
def test_no_filename(self):
assert resize_image.validate_output_file(None), 'All new files should be valid'
@mock.patch('os.access')
def test_not_writable(self, mock_access):
mock_access.return_value = False
with tempfile.NamedTemporaryFile('r') as f:
assert not resize_image.validate_output_file(f.name), 'validation should not pass on unwritable file'
def test_normal(self):
with tempfile.NamedTemporaryFile('r') as f:
assert resize_image.validate_output_file(f.name), 'validation should pass on temporary file'
class TestInputValidation():
def test_does_not_exist(self):
assert not resize_image.validate_input_file(''), 'validation should not pass on missing file'
@mock.patch('os.access')
def test_unreadable_file(self, mock_access):
mock_access.return_value = False
with tempfile.NamedTemporaryFile('r') as f:
assert not resize_image.validate_input_file(f.name), 'validation should not pass on unreadable file'
class TestRangeValidation():
def test_number_none_and_not_allowed(self):
assert not resize_image.validate_range(
None, allow_none=False), 'number=None should not be allowed with allow_none=False'
def test_number_not_float_compatible(self):
value = 'a'
assert not resize_image.validate_range(value), 'number=%s should not be accepted' % value
def test_number_below_min(self):
assert not resize_image.validate_range(0, min_value=1), 'validation should not pass with number < min_value'
def test_number_above_max(self):
assert not resize_image.validate_range(2, max_value=1), 'validation should not pass with number > max_value'
def test_range(self):
assert resize_image.validate_range(
5, min_value=0, max_value=255), 'validation should pass with 5 in range (0, 255)'
|
{
"content_hash": "38ccbd7a88d75d77bd2efb6b3319979a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 116,
"avg_line_length": 36.24561403508772,
"alnum_prop": 0.6902226524685382,
"repo_name": "Deepomatic/DIGITS",
"id": "1ee383dca6dc2406e708cbecf775bc9159558d53",
"size": "2136",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "digits/tools/test_resize_image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4325"
},
{
"name": "HTML",
"bytes": "307884"
},
{
"name": "JavaScript",
"bytes": "52712"
},
{
"name": "Lua",
"bytes": "110641"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "384"
},
{
"name": "Python",
"bytes": "968806"
},
{
"name": "Shell",
"bytes": "13323"
}
],
"symlink_target": ""
}
|
import sys
import os
import os.path
import re
import datetime, time
# local configuration details
BY_ROW = False
TABLE_COLS = 6
# default stylesheet details
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.abspath(THIS_DIR + '/templates')
STYLESHEET = os.path.join(TEMPLATE_DIR, 'bcmd.css')
def writeDoc(model, config):
with open(os.path.join(config['outdir'], config['html']), 'w') as f:
printHeader(f, model, config)
printModelDescription(f, model, config)
printInputs(f, model, config)
printOutputs(f, model, config)
printExternals(f, model, config)
printTags(f, model, config)
printDiffs(f, model, config)
printAlgs(f, model, config)
printIntermeds(f, model, config)
printParameters(f, model, config)
printEmbeds(f, model, config)
printFooter(f, model, config)
def printHeader(file, model, config):
print >> file, '<html>'
print >> file, '<head>'
print >> file, '<title>Model %s Generated Documentation</title>' % config['name']
css = config.get('css-src', None)
if css is None:
css = STYLESHEET
if config.get('css-embed', True) and os.path.isfile(css):
print >> file, '<style media="screen" type="text/css">'
with open(css) as f:
for line in f:
file.write(line)
print >> file, '\n</style>'
else:
print >> file, '<link rel="stylesheet" type="text/css" href="%s" />' % css
print >> file, '</head>'
print >> file, '<body>'
print >> file, '<h1>Model information for %s</h1>' % config['name']
def printModelDescription(file, model, config):
print >> file, '<div class="overview">'
print >> file, '<h2>Description</h2>'
print >> file, '<p>'
for line in model['modeldocs']:
if line.startswith('+') or line.startswith('@') or line.startswith('$') or line.startswith('~'):
pass
elif line == '':
print >> file, '</p><p>'
else:
print >> file, escape(line)
print >> file, '</p>'
print >> file, '<div class="summary">'
print >> file, '<div>%d state variables ' % len(model['roots'])
print >> file, '(%d <a href="#section_differential">differential</a>' % len(model['diffs'])
print >> file, '%d <a href="#section_algebraic">algebraic</a>)' % len(model['algs'])
print >> file, '</div>'
print >> file, '<div>%d <a href="#section_intermediate">intermediate</a> variables (%d unused)</div>' % (len(model['intermeds']), len([x for x in model['intermeds'] if x in model['unused']]))
print >> file, '<div>%d <a href="#section_parameters">parameters</a> (%d unused)</div>' % (len(model['params']), len([x for x in model['params'] if x in model['unused']]))
print >> file, '<div>%d declared <a href="#section_inputs">inputs</a>,' % len(model['inputs'])
print >> file, '%d default <a href="#section_outputs">outputs</a></div>' % len(model['outputs'])
print >> file, '%d declared <a href="#section_external">external variables</a></div>' % len(model['extern'])
print >> file, '<div>%d <a href="#section_tags">tags</a></div>' % len(model['tags'])
if model['embeds']:
print >> file, '<div>Model includes <a href="#section_embeds">embedded C code</a></div>'
print >> file, '</div>'
print >> file, '<div class="files">'
print >> file, '<div>Top level source file: <a href="file://%s">%s</a></div>' % (model['sources'][0][0], os.path.basename(model['sources'][0][1]))
if len(model['sources']) > 1:
print >> file, '<div>The following submodels are imported:</div>'
items = [ '<a href="file://%s">%s</a>' % (sub[1], sub[0]) for sub in model['sources'][1:] ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
else:
print >> file, '<div>No submodels are imported.</div>'
print >> file, '</div>'
def printTags(file, model, config):
print >> file, '<div class="tags">'
print >> file, '<a name="section_tags" />'
print >> file, '<h2>Tags</h2>'
if model['tags']:
for tag in sorted(model['tags'].keys(), key=lambda s: s.lower()):
print >> file, '<div class="tag">'
print >> file, '<h4><a name="_tag_%s">%s</a></h4>' % (tag, tag)
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['tags'][tag], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
print >> file, '</div>'
else:
print >> file, '<p>No tags are defined in this model.</p>'
print >> file, '</div>'
def printInputs(file, model, config):
print >> file, '<div class="inputs">'
print >> file, '<a name="section_inputs" />'
print >> file, '<h2>Inputs</h2>'
if model['inputs']:
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['inputs'], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
else:
print >> file, '<p>No inputs are declared for this model.</p>'
print >> file, '</div>'
def printOutputs(file, model, config):
print >> file, '<div class="outputs">'
print >> file, '<a name="section_outputs" />'
print >> file, '<h2>Outputs</h2>'
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['outputs'], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
print >> file, '</div>'
def printExternals(file, model, config):
print >> file, '<div class="external">'
print >> file, '<a name="section_external" />'
print >> file, '<h2>External Variables</h2>'
print >> file, '<p>External variables are expected to be defined in some other submodel, which has not been'
print >> file, 'imported in the present build. Here they will be treated as parameters and default to 0.</p>'
if model['extern']:
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['extern'], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
else:
print >> file, '<p>No external variables are declared for this model.</p>'
print >> file, '</div>'
# write a list of items as an HTML table, with optional 'decorations' on the tags
# note that the apparent number of columns may be less than specified if filling
# by column (because the last column may be empty)
def tabulate(file, items, ncols, table_decor='', row_decor='', cell_decor='', byrow=False):
nrows = len(items) // ncols
leftover = len(items) % ncols
if leftover > 0:
nrows = nrows + 1
print >> file, '<div class="tabular">'
print >> file, '<table %s>' % table_decor
for rr in range(nrows):
print >> file, '<tr %s>' % row_decor
for cc in range(ncols):
print >> file, '<td %s>' % cell_decor
if byrow:
idx = rr * ncols + cc
else:
idx = cc * nrows + rr
if idx < len(items):
print >> file, items[idx]
print >> file, '</td>'
print >> file, '</tr>'
print >> file, '</table>'
print >> file, '</div>'
def printDiffs(file, model, config):
print >> file, '<div class="differentials">'
print >> file, '<a name="section_differential" />'
print >> file, '<h2>Differential Variables</h2>'
if model['diffs']:
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['diffs'], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
for name in sorted(model['diffs'], key=lambda s: s.lower()):
printVar(name, file, model, config, ['Differential'])
else:
print >> file, '<p>This model includes no differential state variables.</p>'
print >> file, '</div>'
def printAlgs(file, model, config):
print >> file, '<div class="algebraics">'
print >> file, '<a name="section_algebraic" />'
print >> file, '<h2>Algebraic Variables</h2>'
if model['algs']:
items = [ '<a href="#%s">%s</a>' % (name, name) for name in sorted(model['algs'], key=lambda s: s.lower()) ]
tabulate(file, items, ncols=TABLE_COLS, byrow=BY_ROW)
for name in sorted(model['algs'], key=lambda s: s.lower()):
printVar(name, file, model, config, ['Algebraic'])
else:
print >> file, '<p>This model includes no algebraic state variables.</p>'
print >> file, '</div>'
def printIntermeds(file, model, config):
print >> file, '<div class="intermediates">'
print >> file, '<a name="section_intermediate" />'
print >> file, '<h2>Intermediate Variables</h2>'
if model['intermeds']:
for name in sorted(model['intermeds'], key=lambda s: s.lower()):
printVar(name, file, model, config, ['Intermediate'])
else:
print >> file, '<p>This model includes no intermediate variables.</p>'
print >> file, '</div>'
def printParameters(file, model, config):
print >> file, '<div class="parameters">'
print >> file, '<a name="section_parameters" />'
print >> file, '<h2>Parameters</h2>'
if model['params']:
for name in sorted(model['params'], key=lambda s: s.lower()):
printVar(name, file, model, config, ['Parameter'])
else:
print >> file, '<p>This model has no parameters.</p>'
print >>file, '</div>'
def printEmbeds(file, model, config):
if model['embeds']:
print >> file, '<div class="embeds">'
print >> file, '<a name="section_embeds" />'
print >> file, '<h2>Embedded C Code</h2>'
if model['embeds']:
print >> file, '<pre>'
for line in model['embeds']:
print >> file, line
print >> file, '</pre>'
else:
print >> file, '<p>No embedded C is included in this model.</p>'
print >> file, '</div>'
def printFooter(file, model, config):
print >> file, '<div class="footer">'
print >> file, '<p>Generated by <a href="http://tinyurl.com/ucl-bcmd">BCMD</a> module bparser.info</p>'
print >> file, '<p>%s</p>' % datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
print >> file, '</body>'
print >> file, '</html>'
def printVar(name, file, model, config, classes):
if name in model['extern']: classes = ['<b>Unsatisfied External</b>']
if name in model['chemicals']: classes.append('Species')
if name in model['inputs']: classes.append('Input')
if name in model['outputs']: classes.append('Output')
if name in model['unused']: classes.append('Unused')
print >> file, '<div class="symbol">'
print >> file, '<a name="%s" />' % name
print >> file, '<h4>%s</h4>' % name
sym = model['symbols'][name]
desc = []
for line in sym['docs']:
if line.startswith('+') or line.startswith('@') or line.startswith('$') or line.startswith('~'):
pass
elif line == '':
desc.append('</p><p>')
else:
desc.append(escape(line))
if desc:
print >> file, '<div class="description">'
print >> file, '<p>'
for line in desc:
print >> file, line
print >> file, '</p>'
print >> file, '</div>'
print >> file, '<div class="classes"><span class="label">Kind:</span> %s</div>' % ', '.join(classes)
if 'units' in sym:
print >> file, '<div class="units"><span class="label">Units:</span> %s</div>' % sym['units']
if sym['diffs']:
lhs = "%s'" % name
deps = set()
for aux in model['auxiliaries'][name]:
mass = aux[0]
if mass < 0:
mass = -mass
op = '-'
else:
op = '+'
if mass == 1:
mstr = ''
else:
mstr = '%s * ' % str(mass)
lhs = "%s %s %s %s'" % (lhs, op, mstr, aux[1])
deps |= set([aux[1]])
expr = '%s = %s' % (lhs, sym['diffs'][0]['expr'])
deps |= sym['diffs'][0]['depends']
elif sym['algs']:
expr = '%s = 0' % sym['algs'][0]['expr']
deps = sym['algs'][0]['depends']
else:
exprs = [x for x in sym['assigns'] if not x['init']]
if exprs:
expr = exprs[0]['expr']
deps = exprs[0]['depends']
else:
expr = ''
deps = []
if expr:
print >> file, '<div><span class="label">Expression:</span> %s</div>' % escape(expr)
deps = [ '<a href="#%s">%s</a>' % (x,x) for x in sorted(deps, key=lambda s: s.lower()) ]
print >> file, '<div><span class="label">Dependencies:</span> %s</div>' % ', '.join(deps)
init = [x for x in sym['assigns'] if x['init']]
if init:
print >> file, '<div><span class="label">Initialiser:</span> %s</div>' % init[0]['expr']
if init[0]['depends']:
ideps = [ '<a href="#%s">%s</a>' % (x,x) for x in sorted(init[0]['depends'], key=lambda s: s.lower()) ]
print >> file, '<div><span class="label">Initialiser Dependencies:</span> %s</div>' % ', '.join(ideps)
else:
print >> file, '<div><span class="label">Initialiser:</span> Not specified, defaults to 0</div>'
if sym['tags']:
tags = [ '<a href="#_tag_%s">%s</a>' % (x,x) for x in sorted(sym['tags'], key=lambda s: s.lower()) ]
print >> file, '<div><span class="label">Tags:</span> %s</div>' % ', '.join(tags)
print >> file, '</div>' # symbol
# currently very crude, to be expanded...
def escape(text):
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
return text
|
{
"content_hash": "76b6a1a8d66d6fcbc8cb6deaf8c595ce",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 195,
"avg_line_length": 38.825,
"alnum_prop": 0.5441797238320097,
"repo_name": "buck06191/bcmd-web",
"id": "9236ddfd1106ed4450b3e1eb5835cb8419023919",
"size": "14014",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bparser/doc_html.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "66609"
},
{
"name": "CSS",
"bytes": "2658"
},
{
"name": "Fortran",
"bytes": "504385"
},
{
"name": "HTML",
"bytes": "41845"
},
{
"name": "JavaScript",
"bytes": "49641"
},
{
"name": "Makefile",
"bytes": "12251"
},
{
"name": "Python",
"bytes": "451973"
},
{
"name": "Shell",
"bytes": "15231"
},
{
"name": "TeX",
"bytes": "388"
}
],
"symlink_target": ""
}
|
from keystoneclient import base
from keystoneclient import exceptions
from keystoneclient.v3.contrib.fiware_roles.utils import ROLES_PATH
class RoleAssignment(base.Resource):
pass
class RoleAssignmentManager(base.CrudManager):
"""Manager class for manipulating user and organization roles assignments."""
resource_class = RoleAssignment
collection_key = 'role_assignments'
key = 'role_assignment'
base_url = ROLES_PATH
def list_user_role_assignments(self, user=None, organization=None,
application=None, default_organization=False):
"""Lists role assignments for users.
If no arguments are provided, all role assignments in the
system will be listed.
:param user: User to be used as query filter. (optional)
:param organization: Project to be used as query filter.
(optional)
:param application: Application to be used as query
filter. (optional)
:param default_organization: If set to true, the endpoint will filter role assignments
only in the default_project_id and the organization param is ignored. (optional)
"""
query_params = {}
if user:
query_params['user_id'] = base.getid(user)
if organization:
query_params['organization_id'] = base.getid(organization)
if application:
query_params['application_id'] = base.getid(application)
if default_organization:
query_params['default_organization'] = True
base_url = self.base_url + '/users'
return super(RoleAssignmentManager, self).list(base_url=base_url,
**query_params)
def list_organization_role_assignments(self, organization=None,
application=None):
"""Lists role assignments for organizations.
If no arguments are provided, all role assignments in the
system will be listed.
:param organization: Project to be used as query filter.
(optional)
:param application: Domain to be used as query
filter. (optional)
"""
query_params = {}
if organization:
query_params['organization_id'] = base.getid(organization)
if application:
query_params['application_id'] = base.getid(application)
base_url = self.base_url + '/organizations'
return super(RoleAssignmentManager, self).list(base_url=base_url,
**query_params)
def create(self, **kwargs):
raise exceptions.MethodNotImplemented('Create not supported for'
' role assignments')
def update(self, **kwargs):
raise exceptions.MethodNotImplemented('Update not supported for'
' role assignments')
def get(self, **kwargs):
raise exceptions.MethodNotImplemented('Get not supported for'
' role assignments')
def find(self, **kwargs):
raise exceptions.MethodNotImplemented('Find not supported for'
' role assignments')
def put(self, **kwargs):
raise exceptions.MethodNotImplemented('Put not supported for'
' role assignments')
def delete(self, **kwargs):
raise exceptions.MethodNotImplemented('Delete not supported for'
' role assignments')
|
{
"content_hash": "0e5750b555540debf8c65028a4b9bd72",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 94,
"avg_line_length": 39.130434782608695,
"alnum_prop": 0.5966666666666667,
"repo_name": "ging/python-keystoneclient",
"id": "2289d563f0f94c32e31ffd0749d9bd7ea77cacda",
"size": "4774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/v3/contrib/fiware_roles/role_assignments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1480821"
},
{
"name": "Shell",
"bytes": "7148"
}
],
"symlink_target": ""
}
|
"""Retriever script for Forest Inventory and Analysis
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
from retriever.lib.templates import Script
from retriever.lib.models import Table
from pkg_resources import parse_version
try:
from retriever.lib.defaults import VERSION
from retriever.lib.scripts import open_fr, open_fw
except ImportError:
from retriever import open_fr, open_fw, VERSION
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "Forest Inventory and Analysis"
self.name = "forest-inventory-analysis"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.4.1'
self.ref = "http://fia.fs.fed.us/"
self.urls = {"main": "https://apps.fs.usda.gov/fia/datamart/CSV/",
'species': 'https://apps.fs.usda.gov/fia/datamart/CSV/REF_SPECIES.csv'}
self.keywords = ["plants", "continental-scale", "observational"]
self.citation = "DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]"
self.description = """WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing."""
self.addendum = """This dataset requires downloading many large files - please be patient."""
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
# download and create species table
table = Table('species')
self.engine.auto_create_table(table, url=self.urls['species'])
self.engine.insert_data_from_url(self.urls['species'])
# State abbreviations with the year annual inventory began for that state
stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),
('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),
('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),
('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),
('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),
('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),
('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),
('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),
('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),
('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),
('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),
('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),
('WY', 2000), ('PR', 2001)]
tablelist = ["SURVEY", "PLOT", "COND", "SUBPLOT", "SUBP_COND", "TREE", "SEEDLING"]
for table in tablelist:
for state, year in stateslist:
engine.download_files_from_archive(self.urls["main"] + state + "_" + table + ".ZIP",
[state + "_" + table + ".csv"])
for table in tablelist:
print("Scanning data for table %s..." % table)
prep_file_name = "%s.csv" % table
prep_file = open_fw(engine.format_filename(prep_file_name))
this_file = open_fr(engine.format_filename(stateslist[0][0] + "_" + table + ".csv"))
col_names = this_file.readline()
prep_file.write(col_names)
column_names = [col.strip('"') for col in col_names.split(',')]
year_column = column_names.index("INVYR")
this_file.close()
for state, year in stateslist:
this_file = open_fr(engine.format_filename(state + "_" + table + ".csv"))
this_file.readline()
for line in this_file:
values = line.split(',')
this_year = values[year_column]
if int(this_year) >= year:
prep_file.write(line)
prep_file.close()
engine.auto_create_table(Table(table), filename=prep_file_name)
engine.insert_data_from_file(engine.format_filename(prep_file_name))
try:
os.remove(engine.format_filename(prep_file_name))
except:
pass
return engine
SCRIPT = main()
|
{
"content_hash": "1cc26624eeee64b28ae15b269680a8ee",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 252,
"avg_line_length": 46.56310679611651,
"alnum_prop": 0.542535446205171,
"repo_name": "goelakash/retriever",
"id": "bc383327d5d95e09ea0d6ca7612847da4ce00720",
"size": "4807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/forest_inventory_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "8838"
},
{
"name": "Python",
"bytes": "387910"
},
{
"name": "Shell",
"bytes": "511"
},
{
"name": "TeX",
"bytes": "557"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='lektor-gae',
description='Publishes your Lektor site to Google App Engine.',
url='https://github.com/isotherm/lektor-gae/',
version='0.1',
author=u'Kirk Meyer',
author_email='kirk.meyer@alpaxo.com',
license='MIT',
platforms='any',
py_modules=['lektor_gae'],
entry_points={
'lektor.plugins': [
'gae = lektor_gae:GaePlugin',
]
},
install_requires=[
'Lektor',
'PyYAML',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
{
"content_hash": "0190a046bf5711b485ba110c4f399435",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 30.34375,
"alnum_prop": 0.557157569515963,
"repo_name": "isotherm/lektor-gae",
"id": "28a855fec68f3389703aa429f816a64f3827b3e1",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5500"
}
],
"symlink_target": ""
}
|
def permutationCipher(password, key):
table = str.maketrans(''.join(map(chr, range(ord('a'), ord('z') + 1))), key)
return password.translate(table)
|
{
"content_hash": "e7f7feffa5a09a7475d9286118c0aba7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 80,
"avg_line_length": 52,
"alnum_prop": 0.6602564102564102,
"repo_name": "RevansChen/online-judge",
"id": "2837588194636ac783736279be7ef674196f8761",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codefights/arcade/python-arcade/level-2/17.Permutation-Cipher/Python/solution1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
}
|
import json
import threading
import time
from six import moves
from oslo import messaging
from oslo.messaging._drivers import base
class FakeIncomingMessage(base.IncomingMessage):
def __init__(self, listener, ctxt, message, reply_q, requeue):
super(FakeIncomingMessage, self).__init__(listener, ctxt, message)
self.requeue_callback = requeue
self._reply_q = reply_q
def reply(self, reply=None, failure=None, log_failure=True):
if self._reply_q:
failure = failure[1] if failure else None
self._reply_q.put((reply, failure))
def requeue(self):
self.requeue_callback()
class FakeListener(base.Listener):
def __init__(self, driver, exchange_manager, targets):
super(FakeListener, self).__init__(driver)
self._exchange_manager = exchange_manager
self._targets = targets
# NOTE(sileht): Ensure that all needed queues exists even the listener
# have not been polled yet
for target in self._targets:
exchange = self._exchange_manager.get_exchange(target.exchange)
exchange.ensure_queue(target)
def poll(self, timeout=None):
if timeout is not None:
deadline = time.time() + timeout
else:
deadline = None
while True:
for target in self._targets:
exchange = self._exchange_manager.get_exchange(target.exchange)
(ctxt, message, reply_q, requeue) = exchange.poll(target)
if message is not None:
message = FakeIncomingMessage(self, ctxt, message,
reply_q, requeue)
return message
if deadline is not None:
pause = deadline - time.time()
if pause < 0:
break
pause = min(pause, 0.050)
else:
pause = 0.050
time.sleep(pause)
return None
class FakeExchange(object):
def __init__(self, name):
self.name = name
self._queues_lock = threading.RLock()
self._topic_queues = {}
self._server_queues = {}
def ensure_queue(self, target):
with self._queues_lock:
if target.server:
self._get_server_queue(target.topic, target.server)
else:
self._get_topic_queue(target.topic)
def _get_topic_queue(self, topic):
return self._topic_queues.setdefault(topic, [])
def _get_server_queue(self, topic, server):
return self._server_queues.setdefault((topic, server), [])
def deliver_message(self, topic, ctxt, message,
server=None, fanout=False, reply_q=None):
with self._queues_lock:
if fanout:
queues = [q for t, q in self._server_queues.items()
if t[0] == topic]
elif server is not None:
queues = [self._get_server_queue(topic, server)]
else:
queues = [self._get_topic_queue(topic)]
def requeue():
self.deliver_message(topic, ctxt, message, server=server,
fanout=fanout, reply_q=reply_q)
for queue in queues:
queue.append((ctxt, message, reply_q, requeue))
def poll(self, target):
with self._queues_lock:
if target.server:
queue = self._get_server_queue(target.topic, target.server)
else:
queue = self._get_topic_queue(target.topic)
return queue.pop(0) if queue else (None, None, None, None)
class FakeExchangeManager(object):
def __init__(self, default_exchange):
self._default_exchange = default_exchange
self._exchanges_lock = threading.Lock()
self._exchanges = {}
def get_exchange(self, name):
if name is None:
name = self._default_exchange
with self._exchanges_lock:
return self._exchanges.setdefault(name, FakeExchange(name))
class FakeDriver(base.BaseDriver):
def __init__(self, conf, url, default_exchange=None,
allowed_remote_exmods=None):
super(FakeDriver, self).__init__(conf, url, default_exchange,
allowed_remote_exmods)
self._exchange_manager = FakeExchangeManager(default_exchange)
def require_features(self, requeue=True):
pass
@staticmethod
def _check_serialize(message):
"""Make sure a message intended for rpc can be serialized.
We specifically want to use json, not our own jsonutils because
jsonutils has some extra logic to automatically convert objects to
primitive types so that they can be serialized. We want to catch all
cases where non-primitive types make it into this code and treat it as
an error.
"""
json.dumps(message)
def _send(self, target, ctxt, message, wait_for_reply=None, timeout=None):
self._check_serialize(message)
exchange = self._exchange_manager.get_exchange(target.exchange)
reply_q = None
if wait_for_reply:
reply_q = moves.queue.Queue()
exchange.deliver_message(target.topic, ctxt, message,
server=target.server,
fanout=target.fanout,
reply_q=reply_q)
if wait_for_reply:
try:
reply, failure = reply_q.get(timeout=timeout)
if failure:
raise failure
else:
return reply
except moves.queue.Empty:
raise messaging.MessagingTimeout(
'No reply on topic %s' % target.topic)
return None
def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
retry=None):
# NOTE(sileht): retry doesn't need to be implemented, the fake
# transport always works
return self._send(target, ctxt, message, wait_for_reply, timeout)
def send_notification(self, target, ctxt, message, version, retry=None):
# NOTE(sileht): retry doesn't need to be implemented, the fake
# transport always works
self._send(target, ctxt, message)
def listen(self, target):
exchange = target.exchange or self._default_exchange
listener = FakeListener(self, self._exchange_manager,
[messaging.Target(topic=target.topic,
server=target.server,
exchange=exchange),
messaging.Target(topic=target.topic,
exchange=exchange)])
return listener
def listen_for_notifications(self, targets_and_priorities):
targets = [messaging.Target(topic='%s.%s' % (target.topic, priority),
exchange=target.exchange)
for target, priority in targets_and_priorities]
listener = FakeListener(self, self._exchange_manager, targets)
return listener
def cleanup(self):
pass
|
{
"content_hash": "ccceaaa0f52588f7c61e343c39ba1810",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 35.863414634146345,
"alnum_prop": 0.5622959738846572,
"repo_name": "redhat-openstack/oslo.messaging",
"id": "dfce5a4a8d2d111280f4700dbcb2bec3bdc7921b",
"size": "7996",
"binary": false,
"copies": "2",
"ref": "refs/heads/juno-patches",
"path": "oslo/messaging/_drivers/impl_fake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "954096"
},
{
"name": "Shell",
"bytes": "2805"
}
],
"symlink_target": ""
}
|
''' A scatter plot of a smooth periodic oscillation. This example demonstrates red
circle scatter markers with black outlines, using the low-level ``bokeh.models``
API.
.. bokeh-example-metadata::
:apis: bokeh.models.Circle, bokeh.models.Plot, bokeh.models.ColumnDataSource, bokeh.models.LinearAxis, bokeh.models.PanTool, bokeh.models.WheelZoomTool
:refs: :ref:`ug_basic_scatters_markers`
:keywords: circle, figure, scatter
'''
from numpy import arange, pi, sin
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (Circle, ColumnDataSource, LinearAxis,
PanTool, Plot, WheelZoomTool)
from bokeh.resources import INLINE
from bokeh.util.browser import view
x = arange(-2*pi, 2*pi, 0.1)
y = sin(x)
source = ColumnDataSource(
data=dict(x=x, y=y)
)
plot = Plot(min_border=80)
circle = Circle(x="x", y="y", fill_color="red", size=5, line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "basic_plot.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Basic Glyph Plot"))
print("Wrote %s" % filename)
view(filename)
|
{
"content_hash": "2ca50417b33c98cbb90facedef1ca7e7",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 155,
"avg_line_length": 29.456521739130434,
"alnum_prop": 0.6959409594095941,
"repo_name": "bokeh/bokeh",
"id": "31d3e88d5fafe2dd7c64fbee7afbc04fbd093986",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/models/basic_plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
import requests
from django import forms
from django.conf import settings
from django_comments.forms import CommentForm
from django_markdown.widgets import MarkdownWidget
from core.utils import get_client_ip
from error_posts.models import ErrorPost
class ErrorPostForm(forms.ModelForm):
recaptcha = forms.CharField()
class Meta:
model = ErrorPost
fields = ['exception_type', 'error_message', 'traceback',
'how_to_reproduce', 'django_version']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super().__init__(*args, **kwargs)
initial = kwargs.get('initial')
if initial:
for field in initial:
if field in self.fields:
self.fields[field].widget.attrs['readonly'] = True
if settings.DEBUG:
self.fields['recaptcha'].required = False
def clean_recaptcha(self):
code = self.cleaned_data['recaptcha']
if settings.RECAPTCHA_SECRET_KEY:
ip_address = get_client_ip(self.request)
response = requests.post('https://www.google.com/recaptcha/api/siteverify',
data={'secret': settings.RECAPTCHA_SECRET_KEY,
'response': code,
'remoteip': ip_address})
res = response.json()
if not res['success']:
raise forms.ValidationError('Invalid Recaptcha')
return code
def save(self, data_came_from, commit=True):
instance = super(ErrorPostForm, self).save(commit=False)
instance.data_came_from = data_came_from
if commit:
instance.save()
return instance
class CommentFormWithMarkDown(CommentForm):
comment = forms.CharField(widget=MarkdownWidget())
|
{
"content_hash": "59e5adc07ecb09b6e08e39ae036b4aa3",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 34.14545454545455,
"alnum_prop": 0.5958466453674122,
"repo_name": "fixmydjango/fixmydjango",
"id": "863909adc567868fbb5fc8540e0fa08e249b23b2",
"size": "1878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "error_posts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "228501"
},
{
"name": "HTML",
"bytes": "26609"
},
{
"name": "JavaScript",
"bytes": "68"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "56744"
}
],
"symlink_target": ""
}
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import factory
from mock import Mock, patch
from django.http import HttpRequest
from django.test import TestCase
from seahub.base.accounts import User
from seahub.wiki.models import PersonalWiki
from seahub.views.wiki import personal_wiki
def setup():
global repo_id, mock_content, mock_repo, mock_dirent, request
repo_id = '181150f2-3df0-4ab3-9ecd-1e1ec8e14def'
mock_content = 'fake content'
mock_repo = Mock()
mock_repo.id = repo_id
mock_dirent = Mock()
mock_dirent.obj_name = 'home.md'
request = FakeRequestFactory()
def render_to_response_echo(*args, **kwargs):
"""mocked render_to_response that just returns what was passed in,
also puts the template name into the results dict
"""
context = args[1]
context.update(dict(template_name=args[0]))
return context
patch('seahub.views.wiki.render_to_response',
render_to_response_echo).start()
class PersonalWikiTest(TestCase):
@patch('seahub.views.wiki.seafile_api.get_owned_repo_list')
def test_wiki_does_not_exist(self, mock_get_owned_repo_list):
res = personal_wiki(request)
self.assertFalse(res.get('wiki_exists'))
self.assertEqual('wiki/personal_wiki.html', res.get('template_name'))
@patch('seahub.wiki.utils.seaserv.get_repo')
@patch('seahub.wiki.utils.seaserv.get_commits')
@patch('seahub.views.wiki.seaserv.post_empty_file')
def test_wiki_page_missing(self, mock_post_empty_file,
mock_get_commits, mock_get_repo):
"""
"""
# setup personal wiki
PersonalWiki.objects.create(username=request.user.username,
repo_id=repo_id)
mock_get_repo.return_value = mock_repo
mock_get_commits.return_value = [None]
mock_post_empty_file.return_value = True
res = personal_wiki(request)
self.assertEqual('/home/wiki/home/', res['Location'])
@patch('seahub.views.wiki.get_personal_wiki_page')
@patch('seahub.base.models.FileContributors.objects.get_file_contributors')
def test_wiki_found(self, mock_get_file_contributors,
mock_get_personal_wiki_page):
mock_get_personal_wiki_page.return_value = (mock_content,
mock_repo,
mock_dirent)
mock_get_file_contributors.return_value = ([request.user.username],
None, None)
res = personal_wiki(request)
self.assertEqual('fake content', res.get('content'))
self.assertEqual('fake content', res.get('index_content'))
self.assertEqual('home', res.get('page'))
self.assertEqual('wiki/personal_wiki.html', res.get('template_name'))
########## Helpler functions and classes
def FakeRequestFactory(*args, **kwargs):
''' FakeRequestFactory, FakeMessages and FakeRequestContext are good for
mocking out django views; they are MUCH faster than the Django test client.
'''
user = UserFactory()
if kwargs.get('authenticated'):
user.is_authenticated = lambda: True
request = HttpRequest()
request.user = user
request.cloud_mode = False
request._messages = FakeMessages()
request.session = kwargs.get('session', {})
if kwargs.get('POST'):
request.method = 'POST'
request.POST = kwargs.get('POST')
else:
request.method = 'GET'
request.POST = kwargs.get('GET', {})
return request
class UserFactory(factory.Factory):
''' using the excellent factory_boy library '''
class Meta:
model = User
@classmethod
def _setup_next_sequence(cls):
# Instead of defaulting to starting with 0, start with 1.
return 1
email = factory.Sequence(lambda n: 'user%d@example.ecom' % n)
class FakeMessages:
''' mocks the Django message framework, makes it easier to get
the messages out '''
messages = []
def add(self, level, message, extra_tags):
self.messages.append(str(message))
@property
def pop(self):
return self.messages.pop()
|
{
"content_hash": "0b5532db68dfc3914d53e4d2f1af4d50",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 34.03875968992248,
"alnum_prop": 0.6331131860624004,
"repo_name": "saukrIppl/seahub",
"id": "4458ea94791c108c2c0e1408760120526e9c354f",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/views/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
"""
I look at the unknown HADS table and see if any of these stations exist
in the mesosite database, if so, then I set online to true!
Run from RUN_2AM.sh
"""
from pandas import read_sql
from pyiem.util import get_dbconn, get_dbconnstr, logger
LOG = logger()
def review_iemaccess():
"""Go find stations that have summary entries, but marked offline."""
df = read_sql(
"select s.iemid, t.id, t.network from "
"summary s JOIN stations t on (s.iemid = t.iemid) "
"where day = 'YESTERDAY' and not online",
get_dbconnstr("iem"),
)
if df.empty:
return
LOG.info("Found %s stations offline, but having data", len(df.index))
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
cursor.execute(
"UPDATE stations SET online = 't' where iemid in %s",
(tuple(df["iemid"].to_list()),),
)
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
hads = get_dbconn("hads")
mesosite = get_dbconn("mesosite")
hcursor = hads.cursor()
hcursor2 = hads.cursor()
mcursor = mesosite.cursor()
# look for unknown
hcursor.execute(
"SELECT nwsli, network, max(product) from unknown "
"WHERE length(nwsli) = 5 GROUP by nwsli, network ORDER by nwsli ASC"
)
for row in hcursor:
nwsli = row[0]
network = row[1]
mcursor.execute("SELECT online from stations where id = %s", (nwsli,))
row2 = mcursor.fetchone()
if row2 is None:
continue
if not row2[0]:
print(
("Site %s [%s] %s was unknown, but is in mesosite")
% (nwsli, network, row[2])
)
mcursor.execute(
"update stations SET online = 't' where id = %s "
"and online = 'f'",
(nwsli,),
)
else:
print(
("Site %s [%s] %s was unknown, but online in DB?")
% (nwsli, network, row[2])
)
hcursor2.execute("DELETE from unknown where nwsli = %s", (nwsli,))
hcursor2.close()
hads.commit()
mcursor.close()
mesosite.commit()
if __name__ == "__main__":
main()
review_iemaccess()
|
{
"content_hash": "5a01eb754f49e045ea42311d1991cb11",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 28.025,
"alnum_prop": 0.5526315789473685,
"repo_name": "akrherz/iem",
"id": "037b6f59e3472d68c5b7dee163b92a1f3fab8965",
"size": "2242",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/dbutil/clean_unknown_hads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from models import Profile
from django.contrib.auth.models import Group
# Groups are not used, in favor of per-user flags.
# Crates use case involves just a handful of users.
admin.site.unregister(Group)
# We're extending this to include profile elements
admin.site.unregister(User)
class ProfileInlineAdmin(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'API Access'
readonly_fields = ('bytes_inbound','bytes_outbound','object_count','bytes_available','bytes_total','objects_common')
@admin.register(User)
class CratesUserAdmin(UserAdmin):
inlines = (ProfileInlineAdmin, )
list_display = ('__unicode__','is_superuser')#,'profile_has_api_access','can_upload')
search_fields = list_display
# override this to remove useless (to crates) group/permissions
fieldsets = (
('Credentials', {'fields': ('username', 'password')}),
('Personal info', {'fields': ('first_name', 'last_name', 'email')}),
('General Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser',)}),
('Important dates', {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username','first_name','last_name','email','password1', 'password2','is_superuser'),
}),
)
|
{
"content_hash": "6645ab5244ad93353c4bdbad4978f127",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 120,
"avg_line_length": 37.3,
"alnum_prop": 0.6548257372654156,
"repo_name": "naggie/crates",
"id": "3be18a13891b047772fbd1b8adf8db8d4aad2aea",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "network/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9951"
},
{
"name": "HTML",
"bytes": "5776"
},
{
"name": "JavaScript",
"bytes": "28459"
},
{
"name": "Nginx",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "80500"
}
],
"symlink_target": ""
}
|
import os
import argparse
from ffissh.ssh import Connection
def _ssh(args):
conn = Connection(host=args.host, port=args.port, username=args.username)
conn.privkey = args.privkey
conn.pubkey = args.pubkey
conn.passphrase = args.passphrase
buf = ''
with conn:
chan = conn.request_portforward(args.forwardport)
while True:
conn.waitsocket()
buf += chan.read_nonblocking()
if buf.endswith('\r\n\r\n'):
print buf
break
parser = argparse.ArgumentParser()
parser.add_argument('forwardport', type=int)
parser.add_argument('--host', required=True)
parser.add_argument('--port', type=int, default=22)
parser.add_argument('--pubkey',
default=os.path.expanduser('~/.ssh/id_rsa.pub'))
parser.add_argument('--privkey', default=os.path.expanduser('~/.ssh/id_rsa'))
parser.add_argument('--username', required=True)
parser.add_argument('--known-hosts',
default=os.path.expanduser('~/.ssh/known_hosts'))
parser.add_argument('--passphrase', default='')
if __name__ == '__main__':
args = parser.parse_args()
_ssh(args)
|
{
"content_hash": "d8e011342edbcd1b18399ac1fe53fcc5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 31.37837837837838,
"alnum_prop": 0.6296296296296297,
"repo_name": "tehasdf/ffissh",
"id": "5226c4a3c3b025d6263f3a2e8893f0019aee6c5e",
"size": "1385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_pfw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17626"
}
],
"symlink_target": ""
}
|
'''
blockr.io
'''
import logging
from lib import config, util, util_jetcoin
def get_host():
if config.BLOCKCHAIN_SERVICE_CONNECT:
return config.BLOCKCHAIN_SERVICE_CONNECT
else:
return 'http://tjet.blockr.io' if config.TESTNET else 'http://jet.blockr.io'
def check():
pass
def getinfo():
result = util.get_url(get_host() + '/api/v1/coin/info', abort_on_error=True)
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['last_block']['nb']
}
}
return None
def listunspent(address):
result = util.get_url(get_host() + '/api/v1/address/unspent/{}/'.format(address), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['unspent']:
newtxo = {
'address': address,
'txid': txo['tx'],
'vout': txo['n'],
'ts': 0,
'scriptPubKey': txo['script'],
'amount': float(txo['amount']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
return None
def getaddressinfo(address):
infos = util.get_url(get_host() + '/api/v1/address/info/{}'.format(address), abort_on_error=True)
if 'status' in infos and infos['status'] == 'success':
txs = util.get_url(get_host() + '/api/v1/address/txs/{}'.format(address), abort_on_error=True)
if 'status' in txs and txs['status'] == 'success':
transactions = []
for tx in txs['data']['txs']:
transactions.append(tx['tx'])
return {
'addrStr': address,
'balance': infos['data']['balance'],
'balanceSat': infos['data']['balance'] * config.UNIT,
'totalReceived': infos['data']['totalreceived'],
'totalReceivedSat': infos['data']['totalreceived'] * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': txs['data']['nb_txs'],
'transactions': transactions
}
return None
def gettransaction(tx_hash):
url = get_host() + '/api/v1/tx/raw/{}'.format(tx_hash)
tx = util.get_url(url, abort_on_error=False)
assert tx and tx.get('status') and tx.get('code')
if tx['code'] == 404:
return None
elif tx['code'] != 200:
raise Exception("Invalid result (code %s), body: %s" % (tx['code'], tx))
if 'status' in tx and tx['status'] == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += vout['value']
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx'].get('blockhash', None), #will be None if not confirmed yet...
'confirmations': tx['data']['tx'].get('confirmations', None),
'time': tx['data']['tx'].get('time', None),
'blocktime': tx['data']['tx'].get('blocktime', None),
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None
def get_pubkey_for_address(address):
#first, get a list of transactions for the address
address_info = getaddressinfo(address)
#if no transactions, we can't get the pubkey
if not address_info['transactions']:
return None
#for each transaction we got back, extract the vin, pubkey, go through, convert it to binary, and see if it reduces down to the given address
for tx_id in address_info['transactions']:
#parse the pubkey out of the first sent transaction
tx = gettransaction(tx_id)
pubkey_hex = tx['vin'][0]['scriptSig']['asm'].split(' ')[1]
if util_jetcoin.pubkey_to_address(pubkey_hex) == address:
return pubkey_hex
return None
|
{
"content_hash": "dd4641374e278ae60eb73cf4ef602f4a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 145,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.5415575136937366,
"repo_name": "paytokens-beta/payblockd",
"id": "1a58409e7e26010a29264c1076a38c89fd8e4b57",
"size": "4199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/blockchain/blockr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305240"
}
],
"symlink_target": ""
}
|
import gettext
import httplib
import logging
import logging.handlers
import re
import time
import XenAPI
translations = gettext.translation('nova', fallback=True)
_ = translations.ugettext
##### Logging setup
def configure_logging(name):
log = logging.getLogger()
log.setLevel(logging.DEBUG)
sysh = logging.handlers.SysLogHandler('/dev/log')
sysh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name)
sysh.setFormatter(formatter)
log.addHandler(sysh)
##### Exceptions
class PluginError(Exception):
"""Base Exception class for all plugin errors."""
def __init__(self, *args):
Exception.__init__(self, *args)
class ArgumentError(PluginError):
"""Raised when required arguments are missing, argument values are invalid,
or incompatible arguments are given.
"""
def __init__(self, *args):
PluginError.__init__(self, *args)
##### Helpers
def ignore_failure(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except XenAPI.Failure, e:
logging.error(_('Ignoring XenAPI.Failure %s'), e)
return None
##### Argument validation
ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$')
def validate_exists(args, key, default=None):
"""Validates that a string argument to a RPC method call is given, and
matches the shell-safe regex, with an optional default value in case it
does not exist.
Returns the string.
"""
if key in args:
if len(args[key]) == 0:
raise ArgumentError(_('Argument %(key)s value %(value)s is too '
'short.') %
{'key': key,
'value': args[key]})
if not ARGUMENT_PATTERN.match(args[key]):
raise ArgumentError(_('Argument %(key)s value %(value)s contains '
'invalid characters.') %
{'key': key,
'value': args[key]})
if args[key][0] == '-':
raise ArgumentError(_('Argument %(key)s value %(value)s starts '
'with a hyphen.') %
{'key': key,
'value': args[key]})
return args[key]
elif default is not None:
return default
else:
raise ArgumentError(_('Argument %s is required.') % key)
def validate_bool(args, key, default=None):
"""Validates that a string argument to a RPC method call is a boolean
string, with an optional default value in case it does not exist.
Returns the python boolean value.
"""
value = validate_exists(args, key, default)
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise ArgumentError(_("Argument %(key)s may not take value %(value)s. "
"Valid values are ['true', 'false'].")
% {'key': key,
'value': value})
def exists(args, key):
"""Validates that a freeform string argument to a RPC method call is given.
Returns the string.
"""
if key in args:
return args[key]
else:
raise ArgumentError(_('Argument %s is required.') % key)
def optional(args, key):
"""If the given key is in args, return the corresponding value, otherwise
return None"""
return key in args and args[key] or None
def get_this_host(session):
return session.xenapi.session.get_this_host(session.handle)
def get_domain_0(session):
this_host_ref = get_this_host(session)
expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"'
expr = expr % this_host_ref
return session.xenapi.VM.get_all_records_where(expr).keys()[0]
def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
vdi_ref = session.xenapi.VDI.create(
{'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': {},
'sm_config': {},
'tags': []})
logging.debug(_('Created VDI %(vdi_ref)s (%(label)s, %(size)s, '
'%(read_only)s) on %(sr_ref)s.') %
{'vdi_ref': vdi_ref,
'label': name_label,
'size': virtual_size,
'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
def with_vdi_in_dom0(session, vdi, read_only, f):
dom0 = get_domain_0(session)
vbd_rec = {}
vbd_rec['VM'] = dom0
vbd_rec['VDI'] = vdi
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug(_('Creating VBD for VDI %s ... '), vdi)
vbd = session.xenapi.VBD.create(vbd_rec)
logging.debug(_('Creating VBD for VDI %s done.'), vdi)
try:
logging.debug(_('Plugging VBD %s ... '), vbd)
session.xenapi.VBD.plug(vbd)
logging.debug(_('Plugging VBD %s done.'), vbd)
return f(session.xenapi.VBD.get_device(vbd))
finally:
logging.debug(_('Destroying VBD for VDI %s ... '), vdi)
vbd_unplug_with_retry(session, vbd)
ignore_failure(session.xenapi.VBD.destroy, vbd)
logging.debug(_('Destroying VBD for VDI %s done.'), vdi)
def vbd_unplug_with_retry(session, vbd):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
while True:
try:
session.xenapi.VBD.unplug(vbd)
logging.debug(_('VBD.unplug successful first time.'))
return
except XenAPI.Failure, e:
if (len(e.details) > 0 and
e.details[0] == 'DEVICE_DETACH_REJECTED'):
logging.debug(_('VBD.unplug rejected: retrying...'))
time.sleep(1)
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
logging.debug(_('VBD.unplug successful eventually.'))
return
else:
logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
e)
return
def with_http_connection(proto, netloc, f):
conn = (proto == 'https' and
httplib.HTTPSConnection(netloc) or
httplib.HTTPConnection(netloc))
try:
return f(conn)
finally:
conn.close()
def with_file(dest_path, mode, f):
dest = open(dest_path, mode)
try:
return f(dest)
finally:
dest.close()
|
{
"content_hash": "16801373d8b209275af02a281c04fc50",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 32.022222222222226,
"alnum_prop": 0.558501040943789,
"repo_name": "plumgrid/plumgrid-nova",
"id": "72d499664642ce4b840acec8b0ec6812b5be9878",
"size": "8060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11944269"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
}
|
import os
import sys
from contextlib import contextmanager
from cStringIO import StringIO
import time
from .iterator import consume
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def consuming(iterator):
try:
yield iterator
finally:
consume(iterator, None)
@contextmanager
def calling(callable, *args, **kwargs):
try:
yield
finally:
callable(*args, **kwargs)
@contextmanager
def change_directory(path):
original_path = os.getcwdu()
os.chdir(path)
try:
yield
finally:
os.chdir(original_path)
@contextmanager
def redirect_stdout(stream=None):
if stream is None:
stream = StringIO()
original_stdout = sys.stdout
sys.stdout = stream
try:
yield stream
finally:
sys.stdout = original_stdout
@contextmanager
def redirect_stderr(stream=None):
if stream is None:
stream = StringIO()
original_stderr = sys.stderr
sys.stderr = stream
try:
yield stream
finally:
sys.stderr = original_stderr
class Timer(object):
def __init__(self):
self._start_time = None
self._end_time = None
def __enter__(self):
self._start_time = time.clock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._end_time = time.clock()
@property
def running(self):
return self._end_time is None and self._start_time is not None
@property
def terminated(self):
return self._end_time is not None
@property
def elapsed(self):
if self._end_time is None:
return time.clock() - self._start_time
return self._end_time - self._start_time
def __repr__(self):
return "Timer(elapsed={}, running={})".format(self.elapsed, self.running)
|
{
"content_hash": "813bef151de54e02531f988878531cfd",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 81,
"avg_line_length": 18.066666666666666,
"alnum_prop": 0.6146547179757512,
"repo_name": "tmr232/awesomelib",
"id": "fc6ab7e5c329a0b51b6c9494a4fb581e2bd436b8",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awesome/context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8389"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django import http
import dmp_packs
from dmp_packs.actions.exceptions import ParameterProcessingException
def coerce_to_expected_type(http_raw_input, expected_type):
if expected_type not in ["string", "integer"]:
raise ValueError
if expected_type == "string":
return str(http_raw_input)
if expected_type == "integer":
return int(http_raw_input)
def get_pack_action_from_slugs(pack_slug, action_slug):
pack_action_tuple = None
for pack_name, pack in dmp_packs.packs.iteritems():
if pack.slug != pack_slug:
continue
for action_name, action in pack.actions.iteritems():
if action.slug != action_slug:
continue
pack_action_tuple = pack, action
if pack_action_tuple is None:
raise http.Http404
pack, action = pack_action_tuple
if not action.enabled:
raise http.Http404
return pack_action_tuple
def process_parameters(action, data_querydict):
cleaned_parameters = {}
for expected_param, param_metadata in action.parameters.iteritems():
if param_metadata.get("required", False):
if not param_metadata.get("immutable", False):
if expected_param not in data_querydict:
raise ParameterProcessingException
if not param_metadata.get("immutable", False):
if expected_param not in data_querydict:
if "default" in param_metadata:
http_raw_input = param_metadata.get("default")
else:
continue
else:
http_raw_input = data_querydict.get(expected_param)
else:
http_raw_input = param_metadata.get("default")
try:
coerce_input_value = \
coerce_to_expected_type(http_raw_input,
param_metadata.get("type"))
except ValueError, value_error_exception:
raise ParameterProcessingException(value_error_exception.message)
cleaned_parameters[expected_param] = coerce_input_value
return cleaned_parameters
|
{
"content_hash": "efefb112712e54067b56f7dfc13949c6",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 31.458333333333332,
"alnum_prop": 0.5947019867549669,
"repo_name": "rjusher/docker-container-manager",
"id": "29843fc983854428c22137b7ace3288906384e03",
"size": "2290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/dmp_packs/actions/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "42312"
}
],
"symlink_target": ""
}
|
"""Open-source TensorFlow Inception v3 Example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import inception_preprocessing
import vgg_preprocessing
from tensorflow.contrib import summary
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.training.python.training import evaluation
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (workers).')
flags.DEFINE_integer(
'iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_bool(
'skip_host_call', default=True,
help=('Skip the host call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the computation running on the TPU.'))
flags.DEFINE_integer(
'train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_total_size', 0,
'Total batch size for evaluation, use the entire validation set if 0')
flags.DEFINE_integer(
'eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_integer(
'train_steps', 213000,
'Number of steps use for training.')
flags.DEFINE_integer(
'train_steps_per_eval', 2000,
'Number of training steps to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer(
'min_eval_interval', 180,
'Minimum number of seconds between evaluations')
flags.DEFINE_integer(
'eval_timeout', None,
'Evaluation timeout: Maximum number of seconds that '
'may elapse while no new checkpoints are observed')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than plain CPUs')
flags.DEFINE_boolean(
'per_host_input_for_training', True,
'If true, input_fn is invoked per host rather than per shard.')
flags.DEFINE_string(
'use_data', 'real',
'One of "fake","real"')
flags.DEFINE_float(
'learning_rate', 0.165,
'Learning rate.')
flags.DEFINE_string(
'optimizer', 'RMS',
'Optimizer (one of sgd, RMS, momentum)')
flags.DEFINE_integer(
'num_classes', 1001,
'Number of classes to distinguish')
flags.DEFINE_integer(
'width', 299,
'Width of input image')
flags.DEFINE_integer(
'height', 299,
'Height of input image')
flags.DEFINE_bool(
'transpose_enabled', False,
'Boolean to enable/disable explicit I/O transpose')
flags.DEFINE_bool(
'log_device_placement', False,
'Boolean to enable/disable log device placement')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'moving_average', True,
'Whether to enable moving average computation on variables')
flags.DEFINE_string(
'preprocessing', 'inception',
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_float(
'learning_rate_decay', 0.94,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'learning_rate_decay_epochs', 3,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_bool(
'display_tensors', False,
'Whether to dump prediction tensors for comparison')
flags.DEFINE_bool(
'clear_update_collections', True,
'Set batchnorm update_collections to None if true, else use default value')
flags.DEFINE_integer(
'cold_epochs', 2,
'Number of epochs using cold learning rate')
flags.DEFINE_integer(
'warmup_epochs', 7,
'Number of epochs using linearly increasing learning rate')
flags.DEFINE_bool(
'use_learning_rate_warmup', False,
'Apply learning rate warmup if true')
# Dataset specific paramenters
flags.DEFINE_bool(
'prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_string(
'precision', 'float32',
help=('Precision to use; one of: {bfloat16, float32}'))
FLAGS = flags.FLAGS
# Dataset constants
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
# Random cropping constants
_RESIZE_SIDE_MIN = 300
_RESIZE_SIDE_MAX = 600
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
# Constants dictating moving average.
MOVING_AVERAGE_DECAY = 0.995
# Batchnorm moving mean/variance parameters
BATCH_NORM_DECAY = 0.996
BATCH_NORM_EPSILON = 1e-3
WEIGHT_DECAY = 0.00004
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
class InputPipeline(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
"""
def __init__(self, is_training, data_dir, use_bfloat16):
self.is_training = is_training
self.data_dir = data_dir
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
image = features['image/encoded']
image = preprocess_raw_bytes(image, is_training=self.is_training, bbox=bbox)
label = tf.cast(
tf.reshape(features['image/class/label'], shape=[]), dtype=tf.int32)
if self.use_bfloat16:
image = tf.cast(image, tf.bfloat16)
return image, label
def dataset_iterator(self, batch_size, shuffle):
"""Constructs a real-data iterator over batches for train or eval.
Args:
batch_size: The effective batch size.
shuffle: Whether or not to shuffle the data.
Returns:
A tf.data iterator.
"""
file_pattern = os.path.join(self.data_dir, 'train-*'
if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
prefetch_dataset, cycle_length=FLAGS.num_files_infeed, sloppy=True))
if shuffle and FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser, num_parallel_calls=FLAGS.num_parallel_calls)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
return dataset.make_one_shot_iterator()
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A (images, labels) tuple of `Tensor`s for a batch of samples.
"""
batch_size = params['batch_size']
if FLAGS.use_data == 'real':
images, labels = self.dataset_iterator(batch_size,
self.is_training).get_next()
else:
images = tf.random_uniform(
[batch_size, FLAGS.height, FLAGS.width, 3], minval=-1, maxval=1)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=999, dtype=tf.int32)
images = tensor_transform_fn(images, params['output_perm'])
return images, labels
def image_serving_input_fn():
"""Serving input fn for raw images.
This function is consumed when exporting a SavedModel.
Returns:
A ServingInputReceiver capable of serving MobileNet predictions.
"""
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
preprocess_raw_bytes, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: New ordering of dimensions
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
def inception_model_fn(features, labels, mode, params):
"""Inception v3 model using Estimator API."""
num_classes = FLAGS.num_classes
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
if isinstance(features, dict):
features = features['feature']
features = tensor_transform_fn(features, params['input_perm'])
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
return logits, end_points
if FLAGS.clear_update_collections:
# updates_collections must be set to None in order to use fused batchnorm
with arg_scope(inception.inception_v3_arg_scope(
weight_decay=0.0,
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON,
updates_collections=None)):
logits, end_points = build_network()
else:
with arg_scope(inception.inception_v3_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON)):
logits, end_points = build_network()
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL and FLAGS.display_tensors and (
not FLAGS.use_tpu):
with tf.control_dependencies([
tf.Print(
predictions['classes'], [predictions['classes']],
summarize=FLAGS.eval_batch_size,
message='prediction: ')
]):
labels = tf.Print(
labels, [labels], summarize=FLAGS.eval_batch_size, message='label: ')
one_hot_labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=tf.cast(end_points['AuxLogits'], tf.float32),
weights=0.4,
label_smoothing=0.1,
scope='aux_loss')
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=logits,
weights=1.0,
label_smoothing=0.1)
losses = tf.add_n(tf.losses.get_losses())
l2_loss = []
for v in tf.trainable_variables():
if 'BatchNorm' not in v.name and 'weights' in v.name:
l2_loss.append(tf.nn.l2_loss(v))
loss = losses + WEIGHT_DECAY * tf.add_n(l2_loss)
initial_learning_rate = FLAGS.learning_rate * FLAGS.train_batch_size / 256
if FLAGS.use_learning_rate_warmup:
# Adjust initial learning rate to match final warmup rate
warmup_decay = FLAGS.learning_rate_decay**(
(FLAGS.warmup_epochs + FLAGS.cold_epochs) /
FLAGS.learning_rate_decay_epochs)
adj_initial_learning_rate = initial_learning_rate * warmup_decay
final_learning_rate = 0.0001 * initial_learning_rate
host_call = None
train_op = None
if is_training:
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
global_step = tf.train.get_or_create_global_step()
current_epoch = tf.cast(
(tf.cast(global_step, tf.float32) / batches_per_epoch), tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=int(FLAGS.learning_rate_decay_epochs * batches_per_epoch),
decay_rate=FLAGS.learning_rate_decay,
staircase=True)
if FLAGS.use_learning_rate_warmup:
wlr = 0.1 * adj_initial_learning_rate
wlr_height = tf.cast(
0.9 * adj_initial_learning_rate /
(FLAGS.warmup_epochs + FLAGS.learning_rate_decay_epochs - 1),
tf.float32)
epoch_offset = tf.cast(FLAGS.cold_epochs - 1, tf.int32)
exp_decay_start = (FLAGS.warmup_epochs + FLAGS.cold_epochs +
FLAGS.learning_rate_decay_epochs)
lin_inc_lr = tf.add(
wlr, tf.multiply(
tf.cast(tf.subtract(current_epoch, epoch_offset), tf.float32),
wlr_height))
learning_rate = tf.where(
tf.greater_equal(current_epoch, FLAGS.cold_epochs),
(tf.where(tf.greater_equal(current_epoch, exp_decay_start),
learning_rate, lin_inc_lr)),
wlr)
# Set a minimum boundary for the learning rate.
learning_rate = tf.maximum(
learning_rate, final_learning_rate, name='learning_rate')
if FLAGS.optimizer == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
elif FLAGS.optimizer == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9)
elif FLAGS.optimizer == 'RMS':
tf.logging.info('Using RMS optimizer')
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
else:
tf.logging.fatal('Unknown optimizer:', FLAGS.optimizer)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=global_step)
if FLAGS.moving_average:
ema = tf.train.ExponentialMovingAverage(
decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
with tf.control_dependencies([train_op]), tf.name_scope('moving_average'):
train_op = ema.apply(variables_to_average)
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide them as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', tf.reduce_mean(loss), step=gs)
summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs)
summary.scalar('current_epoch', tf.reduce_mean(ce), step=gs)
return summary.all_summary_ops()
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
eval_metrics = None
if is_eval:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch, ]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'accuracy': top_1_accuracy,
'accuracy@5': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
class LoadEMAHook(tf.train.SessionRunHook):
"""Hook to load exponential moving averages into corresponding variables."""
def __init__(self, model_dir):
super(LoadEMAHook, self).__init__()
self._model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self._load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self._model_dir), variables_to_restore)
def after_create_session(self, sess, coord):
tf.logging.info('Reloading EMA...')
self._load_ema(sess)
def main(unused_argv):
del unused_argv # Unused
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
params = {
'input_perm': [0, 1, 2, 3],
'output_perm': [0, 1, 2, 3],
}
batch_axis = 0
if FLAGS.transpose_enabled:
params['input_perm'] = [3, 0, 1, 2]
params['output_perm'] = [1, 2, 3, 0]
batch_axis = 3
if FLAGS.eval_total_size > 0:
eval_size = FLAGS.eval_total_size
else:
eval_size = _NUM_EVAL_IMAGES
eval_steps = eval_size // FLAGS.eval_batch_size
iterations = (eval_steps if FLAGS.mode == 'eval' else
FLAGS.iterations)
eval_batch_size = (None if FLAGS.mode == 'train' else
FLAGS.eval_batch_size)
per_host_input_for_training = (
FLAGS.num_shards <= 8 if FLAGS.mode == 'train' else True)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations,
num_shards=FLAGS.num_shards,
per_host_input_for_training=per_host_input_for_training))
inception_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=inception_model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params=params,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=eval_batch_size,
batch_axis=(batch_axis, 0))
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
use_bfloat16 = FLAGS.precision == 'bfloat16'
imagenet_train = InputPipeline(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
imagenet_eval = InputPipeline(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
if FLAGS.moving_average:
eval_hooks = [LoadEMAHook(FLAGS.model_dir)]
else:
eval_hooks = []
if FLAGS.mode == 'eval':
# Run evaluation when there is a new checkpoint
for checkpoint in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # Includes compilation time
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info(
'Eval results: %s. Elapsed seconds: %d', eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(checkpoint).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', checkpoint)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.train_steps // FLAGS.train_steps_per_eval):
tf.logging.info('Starting training cycle %d.' % cycle)
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps_per_eval)
tf.logging.info('Starting evaluation cycle %d .' % cycle)
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Starting training ...')
inception_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
if FLAGS.export_dir is not None:
tf.logging.info('Starting to export model.')
inception_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
{
"content_hash": "69c025380343683cf56b041980d33e52",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 85,
"avg_line_length": 34.01607347876005,
"alnum_prop": 0.6672066963683002,
"repo_name": "mlperf/training_results_v0.5",
"id": "fb2c85a4264b025824dd7ce643b8fc09b59925f8",
"size": "30318",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/tpu/models/experimental/inception/inception_v3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
""" Functions dealing with invalid commands """
def print_invalid_command(command, *args, **kwargs):
print('Invalid command: {}'.format(command))
|
{
"content_hash": "89121bfbf7f7d0b5d0e0d38baa3f215a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 30.4,
"alnum_prop": 0.6973684210526315,
"repo_name": "robobrobro/foe",
"id": "6cc5dc8ff1ea0564cc963ffe8df2a4e255a70795",
"size": "152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "foe/command/invalid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62138"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.units import Unit
from astropy import constants
# Import the relevant PTS classes and modules
from ..tools.logging import log
from ..tools import filesystem as fs
from ..basics.filter import Filter
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..tools.special import remote_filter_convolution, remote_convolution_frame
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
class ObservedImageMaker(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ObservedImageMaker, self).__init__()
# -- Attributes --
# The simulation prefix
self.simulation_prefix = None
# The paths to the 'total' FITS files produced by SKIRT
self.fits_paths = None
# The wavelengths of the simulation
self.wavelengths = None
# Filter names
self.filter_names = ["FUV", "NUV", "u", "g", "r", "i", "z", "H", "J", "Ks", "I1", "I2", "I3", "I4", "W1", "W2",
"W3", "W4", "Pacs 70", "Pacs 100", "Pacs 160", "SPIRE 250", "SPIRE 350", "SPIRE 500"]
# The instrument names
self.instrument_names = None
# The filters for which the images should be created
self.filters = dict()
# The dictionary containing the images for various SKIRT output datacubes
self.images = dict()
# The reference WCS
self.wcs = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None, filter_names=None, instrument_names=None, wcs_path=None, kernel_paths=None, unit=None, host_id=None):
"""
This function ...
:param simulation:
:param output_path:
:param filter_names:
:param instrument_names:
:param wcs_path:
:param kernel_paths:
:param unit:
:param host_id:
:return:
"""
# Obtain the paths to the 'total' FITS files created by the simulation
self.fits_paths = simulation.totalfitspaths()
# Get the list of wavelengths for the simulation
self.wavelengths = simulation.wavelengths()
# Get the simulation prefix
self.simulation_prefix = simulation.prefix()
# Set the filter names
if filter_names is not None: self.filter_names = filter_names
# Set the instrument names
self.instrument_names = instrument_names
# Create the filters
self.create_filters()
# Make the observed images
self.make_images(host_id)
# Set the WCS of the created images
if wcs_path is not None: self.set_wcs(wcs_path)
# Convolve the image with a given convolution kernel
if kernel_paths is not None:
# Check whether the WCS for the image is defined. If not, show a warning and skip the convolution
if wcs_path is None: log.warning("WCS of the image is not defined, so convolution cannot be performed (the pixelscale is undefined)")
else: self.convolve(kernel_paths, host_id)
# Convert the units (WCS has to be loaded!)
if unit is not None: self.convert_units(unit)
# Write the results
if output_path is not None: self.write(output_path)
# -----------------------------------------------------------------
def create_filters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing the filter objects ...")
# Loop over the different filter names
for filter_name in self.filter_names:
# Debugging
log.debug("Constructing the " + filter_name + " filter ...")
# Create the filter
fltr = Filter.from_string(filter_name)
# Add the filter to the list
self.filters[filter_name] = fltr
# -----------------------------------------------------------------
def make_images(self, host_id=None):
"""
This function ...
:param host_id:
:return:
"""
# Inform the user
log.info("Making the observed images (this may take a while) ...")
# Loop over the different simulated images
for path in self.fits_paths:
# Get the name of the instrument
instr_name = instrument_name(path, self.simulation_prefix)
# If a list of instruments is defined an this instrument is not in this list, skip it
if self.instrument_names is not None and instr_name not in self.instrument_names: continue
# Get the name of the datacube (as given by SKIRT)
datacube_name = fs.strip_extension(fs.name(path))
# Debugging
log.debug("Making the observed images for " + datacube_name + ".fits ...")
# Create a dictionary to contain the observed images for this FITS file
images = dict()
# The filter convolution is performed remotely
if host_id is not None:
# Upload the datacube, wavelength grid and filter properties, perform the convolution on the remote and get the resulting image frames back (as a dictionary where the keys are the filter names)
frames = remote_filter_convolution(host_id, path, self.wavelengths, self.filters)
# Add the resulting image frames to the dictionary
for filter_name in frames:
# Add the observed image to the dictionary
images[filter_name] = frames[filter_name]
# The calculation is performed locally
else:
# Load the simulated image
datacube = Image.from_file(path, always_call_first_primary=False)
# Convert the frames from neutral surface brightness to wavelength surface brightness
for l in range(len(self.wavelengths)):
# Get the wavelength
wavelength = self.wavelengths[l]
# Determine the name of the frame in the datacube
frame_name = "frame" + str(l)
# Divide this frame by the wavelength in micron
datacube.frames[frame_name] /= wavelength
# Set the new unit
datacube.frames[frame_name].unit = "W / (m2 * arcsec2 * micron)"
# Convert the datacube to a numpy array where wavelength is the third dimension
fluxdensities = datacube.asarray()
# Loop over the different filters
for filter_name in self.filters:
fltr = self.filters[filter_name]
# Debugging
log.debug("Making the observed image for the " + str(fltr) + " filter ...")
# Calculate the observed image frame
data = fltr.convolve(self.wavelengths, fluxdensities)
frame = Frame(data)
# Set the unit of the frame
frame.unit = "W/(m2 * arcsec2 * micron)"
# Add the observed image to the dictionary
images[filter_name] = frame
# Add the dictionary of images of the current datacube to the complete images dictionary (with the datacube name as a key)
self.images[datacube_name] = images
# -----------------------------------------------------------------
def set_wcs(self, wcs_path):
"""
This function ...
:param wcs_path:
:return:
"""
# TODO: allow multiple paths (in a dictionary) for the different datacubes (so that for certain instruments the WCS should not be set on the simulated images)
# Inform the user
log.info("Setting the WCS of the simulated images ...")
# Debugging
log.debug("Loading the coordinate system from '" + wcs_path + "' ...")
# Load the WCS
self.wcs = CoordinateSystem.from_file(wcs_path)
# Loop over the different images and set the WCS
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Setting the coordinate system of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Set the coordinate system for this frame
self.images[datacube_name][filter_name].wcs = self.wcs
# -----------------------------------------------------------------
def convolve(self, kernel_paths, host_id=None):
"""
This function ...
:param kernel_paths:
:param host_id:
:return:
"""
# Inform the user
log.info("Convolving the images ...")
# If the convolutions must be performed remotely
if host_id is not None:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Determine the kernel path for this image
kernel_path = kernel_paths[filter_name]
# Perform the remote convolution
self.images[datacube_name][filter_name] = remote_convolution_frame(self.images[datacube_name][filter_name], kernel_path, host_id)
# The convolution is performed locally
else:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Load the kernel
kernel = Frame.from_file(kernel_paths[filter_name])
# Debugging
log.debug("Convolving the '" + filter_name + "' image of the '" + datacube_name + "' instrument ...")
# Convolve this image frame
self.images[datacube_name][filter_name].convolve(kernel)
# -----------------------------------------------------------------
def convert_units(self, unit):
"""
This function ...
:param self:
:param unit:
:return:
"""
# TODO: right now, this is just an implementation of the conversion from W / (m2 * arcsec2 * micron) to MJy/sr
# 1 Jy = 1e-26 * W / (m2 * Hz)
# Inform the user
log.info("Converting the units of the images to " + str(unit) + " ...")
# Get the pixelscale
#pixelscale = self.wcs.average_pixelscale.to("arcsec/pix").value # in arcsec**2 / pixel
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Converting the unit of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Get the pivot wavelength of the filter
fltr = self.filters[filter_name]
pivot = fltr.pivotwavelength() * Unit("micron")
# Determine the conversion factor
conversion_factor = 1.0
# From surface brightness to flux density (no)
#conversion_factor *=
# From W / (m2 * arcsec2 * micron) to W / (m2 * arcsec2 * Hz)
conversion_factor *= (pivot ** 2 / speed_of_light).to("micron/Hz").value
# From W / (m2 * arcsec2 * Hz) to MJy / sr
#conversion_factor *= (Unit("W/(m2 * arcsec2 * Hz)") / Unit("MJy/sr")).to("")
conversion_factor *= 1e26 * 1e-6 * (Unit("sr") / Unit("arcsec2")).to("")
# Convert
self.images[datacube_name][filter_name] *= conversion_factor
self.images[datacube_name][filter_name].unit = "MJy/sr"
# -----------------------------------------------------------------
def write(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over the different images (self.images is a nested dictionary of dictionaries)
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Determine the path to the output FITS file
path = fs.join(output_path, datacube_name + "__" + filter_name + ".fits")
# Save the image
self.images[datacube_name][filter_name].save(path)
# -----------------------------------------------------------------
def instrument_name(datacube_path, prefix):
"""
This function ...
:param datacube_path:
:param prefix:
:return:
"""
return fs.name(datacube_path).split("_total.fits")[0].split(prefix + "_")[1]
# -----------------------------------------------------------------
|
{
"content_hash": "91b0651c161cbf3462bea417a13c362a",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 209,
"avg_line_length": 35.04556962025316,
"alnum_prop": 0.5420067904355992,
"repo_name": "Stargrazer82301/CAAPR",
"id": "60c1a81157e41324b28041d302b03e92ee199d40",
"size": "14336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CAAPR/CAAPR_AstroMagic/PTS/pts/core/misc/images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "21972"
},
{
"name": "HTML",
"bytes": "2408"
},
{
"name": "Prolog",
"bytes": "16433"
},
{
"name": "Python",
"bytes": "4465217"
},
{
"name": "Shell",
"bytes": "3793"
}
],
"symlink_target": ""
}
|
"""benchmarking through py.test"""
import py
from py.__.test.item import Item
from py.__.test.terminal.terminal import TerminalSession
from math import ceil as _ceil, floor as _floor, log10
import timeit
from inspect import getsource
# from IPython.Magic.magic_timeit
#units = ["s", "ms", "\xc2\xb5s", "ns"]
units = ["s", "ms", "us", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
unitn = dict((s,i) for i,s in enumerate(units))
precision = 3
# like py.test Directory but scan for 'bench_<smth>.py'
class Directory(py.test.collect.Directory):
def filefilter(self, path):
b = path.purebasename
ext = path.ext
return b.startswith('bench_') and ext == '.py'
# like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>'
class Module(py.test.collect.Module):
def funcnamefilter(self, name):
return name.startswith('bench_') or name.startswith('timeit_')
# Function level benchmarking driver
class Timer(timeit.Timer):
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()):
# copy of timeit.Timer.__init__
# similarity index 95%
self.timer = timer
stmt = timeit.reindent(stmt, 8)
setup = timeit.reindent(setup, 4)
src = timeit.template % {'stmt': stmt, 'setup': setup}
self.src = src # Save for traceback display
code = compile(src, timeit.dummy_src_name, "exec")
ns = {}
#exec code in globals(), ns -- original timeit code
exec code in globals, ns # -- we use caller-provided globals instead
self.inner = ns["inner"]
class Function(py.__.test.item.Function):
def __init__(self, *args, **kw):
super(Function, self).__init__(*args, **kw)
self.benchtime = None
self.benchtitle = None
def execute(self, target, *args):
# get func source without first 'def func(...):' line
src = getsource(target)
src = '\n'.join( src.splitlines()[1:] )
# extract benchmark title
if target.func_doc is not None:
self.benchtitle = target.func_doc
else:
self.benchtitle = src.splitlines()[0].strip()
# XXX we ignore args
timer = Timer(src, globals=target.func_globals)
if self.name.startswith('timeit_'):
# from IPython.Magic.magic_timeit
repeat = 3
number = 1
for i in range(1,10):
t = timer.timeit(number)
if t >= 0.2:
number *= (0.2 / t)
number = int(_ceil(number))
break
if t <= 0.02:
# we are not close enough to that 0.2s
number *= 10
else:
# since we are very close to be > 0.2s we'd better adjust number
# so that timing time is not too high
number *= (0.2 / t)
number = int(_ceil(number))
break
self.benchtime = min(timer.repeat(repeat, number)) / number
# 'bench_<smth>'
else:
self.benchtime = timer.timeit(1)
class BenchSession(TerminalSession):
def header(self, colitems):
#self.out.sep("-", "benchmarking starts")
super(BenchSession, self).header(colitems)
def footer(self, colitems):
super(BenchSession, self).footer(colitems)
#self.out.sep("-", "benchmarking ends")
self.out.write('\n')
self.print_bench_results()
def print_bench_results(self):
self.out.write('==============================\n')
self.out.write(' *** BENCHMARKING RESULTS *** \n')
self.out.write('==============================\n')
self.out.write('\n')
# benchname, time, benchtitle
results = []
for item, outcome in self._memo:
if isinstance(item, Item):
best = item.benchtime
if best is None:
# skipped or failed benchmarks
tstr = '---'
else:
# from IPython.Magic.magic_timeit
if best > 0.0:
order = min(-int(_floor(log10(best)) // 3), 3)
else:
order = 3
tstr = "%.*g %s" % (precision, best * scaling[order], units[order])
results.append( [item.name, tstr, item.benchtitle] )
# dot/unit align second column
# FIXME simpler? this is crappy -- shame on me...
wm = [0]*len(units)
we = [0]*len(units)
for s in results:
tstr = s[1]
n,u = tstr.split()
# unit n
un = unitn[u]
try:
m,e = n.split('.')
except ValueError:
m,e = n,''
wm[un] = max(len(m), wm[un])
we[un] = max(len(e), we[un])
for s in results:
tstr = s[1]
n,u = tstr.split()
un = unitn[u]
try:
m,e = n.split('.')
except ValueError:
m,e = n,''
m = m.rjust(wm[un])
e = e.ljust(we[un])
if e.strip():
n = '.'.join((m,e))
else:
n = ' '.join((m,e))
# let's put the number into the right place
txt = ''
for i in range(len(units)):
if i == un:
txt += n
else:
txt += ' '*(wm[i]+we[i]+1)
s[1] = '%s %s' % (txt, u)
# align all columns besides the last one
for i in range(2):
w = max(len(s[i]) for s in results)
for s in results:
s[i] = s[i].ljust(w)
# show results
for s in results:
self.out.write('%s | %s | %s\n' % tuple(s))
def main(args=None):
# hook our Directory/Module/Function as defaults
from py.__.test import defaultconftest
defaultconftest.Directory = Directory
defaultconftest.Module = Module
defaultconftest.Function = Function
# hook BenchSession as py.test session
config = py.test.config
config._getsessionclass = lambda: BenchSession
py.test.cmdline.main(args)
|
{
"content_hash": "17a20198da2a439d740d5a635429aeb3",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 90,
"avg_line_length": 27.721739130434784,
"alnum_prop": 0.5006273525721455,
"repo_name": "srjoglekar246/sympy",
"id": "b6c6b27077d22c864fa6717850f020f3e0d3d0f1",
"size": "6376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/utilities/benchmarking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10283965"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
celery.security
~~~~~~~~~~~~~~~
Module implementing the signing message serializer.
"""
from __future__ import absolute_import
from kombu.serialization import (
registry, disable_insecure_serializers as _disable_insecure_serializers,
)
from celery.exceptions import ImproperlyConfigured
from .serialization import register_auth
SSL_NOT_INSTALLED = """\
You need to install the pyOpenSSL library to use the auth serializer.
Please install by:
$ pip install pyOpenSSL
"""
SETTING_MISSING = """\
Sorry, but you have to configure the
* CELERY_SECURITY_KEY
* CELERY_SECURITY_CERTIFICATE, and the
* CELERY_SECURITY_CERT_STORE
configuration settings to use the auth serializer.
Please see the configuration reference for more information.
"""
__all__ = ['setup_security']
def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
digest='sha1', serializer='json', app=None):
"""See :meth:`@Celery.setup_security`."""
if app is None:
from celery import current_app
app = current_app._get_current_object()
_disable_insecure_serializers(allowed_serializers)
conf = app.conf
if conf.CELERY_TASK_SERIALIZER != 'auth':
return
try:
from OpenSSL import crypto # noqa
except ImportError:
raise ImproperlyConfigured(SSL_NOT_INSTALLED)
key = key or conf.CELERY_SECURITY_KEY
cert = cert or conf.CELERY_SECURITY_CERTIFICATE
store = store or conf.CELERY_SECURITY_CERT_STORE
if not (key and cert and store):
raise ImproperlyConfigured(SETTING_MISSING)
with open(key) as kf:
with open(cert) as cf:
register_auth(kf.read(), cf.read(), store, digest, serializer)
registry._set_default_serializer('auth')
def disable_untrusted_serializers(whitelist=None):
_disable_insecure_serializers(allowed=whitelist)
|
{
"content_hash": "8449a83183853b2717d6885ba941825b",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 27.12857142857143,
"alnum_prop": 0.6929963138493944,
"repo_name": "johankaito/fufuka",
"id": "352d400cfcec4f6a6c2b435ebb8e18f5378f44c6",
"size": "1923",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "microblog/flask/venv/lib/python2.7/site-packages/celery/security/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "70167"
},
{
"name": "C",
"bytes": "993849"
},
{
"name": "C++",
"bytes": "4924114"
},
{
"name": "CSS",
"bytes": "57195"
},
{
"name": "Fortran",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "3832217"
},
{
"name": "Java",
"bytes": "608432"
},
{
"name": "JavaScript",
"bytes": "48304"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "575902"
},
{
"name": "Python",
"bytes": "41068291"
},
{
"name": "Shell",
"bytes": "952977"
},
{
"name": "XSLT",
"bytes": "46584"
}
],
"symlink_target": ""
}
|
import re
import random
import base64
import logging
class RandomProxy(object):
def __init__(self, settings):
self.proxy_list = settings.get('PROXY_LIST')
fin = open(self.proxy_list)
self.proxies = {}
for line in fin.readlines():
parts = re.match('(\w+://)(\w+:\w+@)?(.+)', line)
# Cut trailing @
if parts.group(2):
user_pass = parts.group(2)[:-1]
else:
user_pass = ''
self.proxies[parts.group(1) + parts.group(3)] = user_pass
fin.close()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
# Don't overwrite with a random one (server-side state for IP)
if 'proxy' in request.meta:
return
proxy_address = random.choice(self.proxies.keys())
proxy_user_pass = self.proxies[proxy_address]
request.meta['proxy'] = proxy_address
if proxy_user_pass:
basic_auth = 'Basic ' + base64.encodestring(proxy_user_pass)
request.headers['Proxy-Authorization'] = basic_auth
def process_exception(self, request, exception, spider):
proxy = request.meta['proxy']
logging.debug('Removing failed proxy <%s>, %d proxies left' % (
proxy, len(self.proxies)))
try:
del self.proxies[proxy]
except ValueError:
pass
|
{
"content_hash": "276456e51a5397487268e61dba578d1f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 29.68,
"alnum_prop": 0.5633423180592992,
"repo_name": "trujunzhang/djzhang-targets",
"id": "1cdc82fb5ca4a115fe7f523edcbc24aea2b7ac54",
"size": "2608",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cwgooglelinkedin/cwgooglelinkedin/extensions/randomproxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7418804"
},
{
"name": "JavaScript",
"bytes": "936547"
},
{
"name": "PHP",
"bytes": "94539"
},
{
"name": "Python",
"bytes": "564898"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.forms import PasswordResetForm, UserCreationForm, AuthenticationForm
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import get_user_model
from captcha.fields import ReCaptchaField
from django.conf import settings
class CaptchaPasswordResetForm(PasswordResetForm):
captcha = (
ReCaptchaField()
if settings.RECAPTCHA_PUBLIC_KEY != '' and settings.RECAPTCHA_PRIVATE_KEY != ''
else None
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].widget.attrs.update({'autofocus': 'autofocus'})
def get_users(self, email):
# removed check verifying if password is unusable
user_model = get_user_model()
active_users = user_model._default_manager.filter(**{
'%s__iexact' % user_model.get_email_field_name(): email,
'is_active': True,
})
return active_users
class UsernameOrEmailAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = _("Username / Email")
class UserCreationForm(UserCreationForm):
"""
A UserCreationForm with optional password inputs.
"""
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['password1'].required = False
self.fields['password2'].required = False
# If one field gets autocompleted but not the other, our 'neither
# password or both password' validation will be triggered.
self.fields['password1'].widget.attrs['autocomplete'] = 'off'
self.fields['password2'].widget.attrs['autocomplete'] = 'off'
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
if bool(password1) ^ bool(password2):
raise forms.ValidationError("Fill out both fields")
return password2
|
{
"content_hash": "e22417837dc136a5938a5b8cf58d1d2e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 93,
"avg_line_length": 36.59016393442623,
"alnum_prop": 0.6671146953405018,
"repo_name": "wallysalami/gamified-education",
"id": "cafa255de8b10ebf0c25c4d9f42ad4daa90adb03",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course/forms/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14546"
},
{
"name": "Dockerfile",
"bytes": "196"
},
{
"name": "HTML",
"bytes": "29374"
},
{
"name": "JavaScript",
"bytes": "3937"
},
{
"name": "Python",
"bytes": "84126"
}
],
"symlink_target": ""
}
|
"""
This plugin does not perform ANY test: The aim is to visit all URLs grabbed so far and build
the transaction log to feed data to other plugins
NOTE: This is an active plugin because it may visit URLs retrieved by vulnerability scanner spiders
which may be considered sensitive or include vulnerability probing
"""
import logging
from owtf.requester.base import requester
from owtf.managers.url import get_urls_to_visit
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Visit URLs found by other tools, some could be sensitive: need permission"
def run(PluginInfo):
urls = get_urls_to_visit()
for url in urls: # This will return only unvisited urls
requester.get_transaction(True, url) # Use cache if possible
Content = "{} URLs were visited".format(str(len(urls)))
logging.info(Content)
return plugin_helper.HtmlString(Content)
|
{
"content_hash": "76ca22d22afb1b10e6b2da68b9dcd766",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 99,
"avg_line_length": 39.81818181818182,
"alnum_prop": 0.7625570776255708,
"repo_name": "owtf/owtf",
"id": "2cedad59b10aa0fe10ea1da3ac0153b03988dee5",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "owtf/plugins/web/active/Visit_URLs@OWTF-WSP-001.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "146"
},
{
"name": "Dockerfile",
"bytes": "2160"
},
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "JavaScript",
"bytes": "487381"
},
{
"name": "Makefile",
"bytes": "4814"
},
{
"name": "Python",
"bytes": "690934"
},
{
"name": "SCSS",
"bytes": "19170"
},
{
"name": "Shell",
"bytes": "52067"
},
{
"name": "TypeScript",
"bytes": "261109"
}
],
"symlink_target": ""
}
|
"""
Base class every component used. A component is handled as a plugin.
The features of a plugin is defined by interfaces. Those interfaces will
force a component to resolve queries that come from the framework.
"""
from glob import glob
import imp
import pkg_resources
from pkg_resources import working_set, DistributionNotFound, VersionConflict, \
UnknownExtra
import os
import sys
from concurrent.core.util.texttransforms import exception_to_unicode
# We only load that one coz' it will be the only entrypoint of this module
__all__ = ['load_components']
def _enable_component(env, module):
"""Enable the given component by adding an entry to the configuration.
TODO: This depends on how I'll finally create the config behavior of our environment :D
"""
if module + '.*' not in env.config['components']:
env.config['components'].set(module + '.*', 'enabled')
def load_eggs(entry_point_name):
"""Loader that loads any eggs on the search path and `sys.path`."""
def _load_eggs(env, search_path, auto_enable=None):
# Note that the following doesn't seem to support unicode search_path
distributions, errors = working_set.find_plugins(
pkg_resources.Environment(search_path)
)
for dist in distributions:
env.log.debug('Adding plugin %s from %s', dist, dist.location)
working_set.add(dist)
def _log_error(item, e):
ue = exception_to_unicode(e)
if isinstance(e, DistributionNotFound):
env.log.debug('Skipping "%s": ("%s" not found)', item, ue)
elif isinstance(e, VersionConflict):
env.log.error('Skipping "%s": (version conflict "%s")',
item, ue)
elif isinstance(e, UnknownExtra):
env.log.error('Skipping "%s": (unknown extra "%s")', item, ue)
elif isinstance(e, ImportError):
#env.log.error('Skipping "%s": (can\'t import "%s")', item, ue)
print('Skipping "%s": (can\'t import "%s")' % ( item, ue) )
else:
env.log.error('Skipping "%s": (error "%s")', item, ue)
for dist, e in errors.iteritems():
_log_error(dist, e)
for entry in working_set.iter_entry_points(entry_point_name):
env.log.debug('Loading %s from %s', entry.name,
entry.dist.location)
try:
entry.load(require=True)
except (ImportError, DistributionNotFound, VersionConflict,
UnknownExtra) as e:
print(entry.module_name)
_log_error(entry, e)
else:
if os.path.dirname(entry.dist.location) == auto_enable:
_enable_component(env, entry.module_name)
return _load_eggs
def load_py_files():
"""Loader that look for Python source files in the plugins directories,
which simply get imported, thereby registering them with the component
manager if they define any components.
"""
def _load_py_files(env, search_path, auto_enable=None):
for path in search_path:
plugin_files = glob(os.path.join(path, '*.py'))
for plugin_file in plugin_files:
try:
plugin_name = os.path.basename(plugin_file[:-3])
env.log.debug('Loading file plugin %s from %s' % \
(plugin_name, plugin_file))
if plugin_name not in sys.modules:
module = imp.load_source(plugin_name, plugin_file)
if path == auto_enable:
_enable_component(env, plugin_name)
except Exception as e:
env.log.error('Failed to load plugin from %s: %s',
plugin_file,
exception_to_unicode(e, traceback=True))
return _load_py_files
def load_components(env, extra_path=None, loaders=(load_eggs('concurrent.components'),
load_py_files())):
"""Load all components found on the given search path."""
plugins_dir = os.path.normcase(os.path.realpath(
env.get_plugins_dir()
))
search_path = [plugins_dir]
# add paths to our framework folders
framework_dir = os.path.normcase(os.path.realpath(
env.get_plugins_dir()
))
# if we got any extra path to be added, do this now!
if extra_path:
search_path += list(extra_path)
# Load all component we can
for loadfunc in loaders:
loadfunc(env, search_path, auto_enable=plugins_dir)
|
{
"content_hash": "1a810bccbd33b1c0416f9145bdadc518",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 91,
"avg_line_length": 43.10909090909091,
"alnum_prop": 0.5782370307886967,
"repo_name": "moritz-wundke/Concurrent",
"id": "45942cc2aeae60490d0f6c797b95f308f46ca653",
"size": "4766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concurrent/core/components/componentloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "93243"
},
{
"name": "Python",
"bytes": "511887"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
}
|
import itertools
import json
import os
from urllib.parse import unquote
from django.apps import apps
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.template import Context, Engine
from django.urls import translate_url
from django.utils.encoding import force_text
from django.utils.formats import get_format
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language,
)
from django.utils.translation.trans_real import DjangoTranslation
from django.views.generic import View
LANGUAGE_QUERY_PARAMETER = 'language'
def set_language(request):
"""
Redirect to a given URL while setting the chosen language in the session or
cookie. The URL and the language code need to be specified in the request
parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if ((next or not request.is_ajax()) and
not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure())):
next = request.META.get('HTTP_REFERER')
if next:
next = unquote(next) # HTTP_REFERER may be encoded.
if not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure()):
next = '/'
response = HttpResponseRedirect(next) if next else HttpResponse(status=204)
if request.method == 'POST':
lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
if next:
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
def get_formats():
"""Return all formats strings required for i18n to work."""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (int, str)):
formats[k] = force_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [force_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
def indent(s):
return s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return HttpResponse(template.render(context), 'text/javascript')
def null_javascript_catalog(request, domain=None, packages=None):
"""
Return "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
class JavaScriptCatalog(View):
"""
Return the selected language catalog as a JavaScript library.
Receive the list of packages to check for translations in the `packages`
kwarg either from the extra dictionary passed to the url() function or as a
plus-sign delimited string from the request. Default is 'django.conf'.
You can override the gettext domain for this view, but usually you don't
want to do that as JavaScript messages go to the djangojs domain. This
might be needed if you deliver your JavaScript source from Django templates.
"""
domain = 'djangojs'
packages = None
def get(self, request, *args, **kwargs):
locale = get_language()
domain = kwargs.get('domain', self.domain)
# If packages are not provided, default to all installed packages, as
# DjangoTranslation without localedirs harvests them all.
packages = kwargs.get('packages', '')
packages = packages.split('+') if packages else self.packages
paths = self.get_paths(packages) if packages else None
self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_paths(self, packages):
allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs())
app_configs = [allowable_packages[p] for p in packages if p in allowable_packages]
# paths of requested packages
return [os.path.join(app.path, 'locale') for app in app_configs]
def get_plural(self):
plural = None
if '' in self.translation._catalog:
for line in self.translation._catalog[''].split('\n'):
if line.startswith('Plural-Forms:'):
plural = line.split(':', 1)[1].strip()
if plural is not None:
# This should be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
return plural
def get_catalog(self):
pdict = {}
maxcnts = {}
catalog = {}
trans_cat = self.translation._catalog
trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {}
for key, value in itertools.chain(iter(trans_cat.items()), iter(trans_fallback_cat.items())):
if key == '' or key in catalog:
continue
if isinstance(key, str):
catalog[key] = value
elif isinstance(key, tuple):
msgid = key[0]
cnt = key[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = value
else:
raise TypeError(key)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[k] + 1)]
return catalog
def get_context_data(self, **kwargs):
return {
'catalog': self.get_catalog(),
'formats': get_formats(),
'plural': self.get_plural(),
}
def render_to_response(self, context, **response_kwargs):
def indent(s):
return s.replace('\n', '\n ')
template = Engine().from_string(js_catalog_template)
context['catalog_str'] = indent(
json.dumps(context['catalog'], sort_keys=True, indent=2)
) if context['catalog'] else None
context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2))
return HttpResponse(template.render(Context(context)), 'text/javascript')
class JSONCatalog(JavaScriptCatalog):
"""
Return the selected language catalog as a JSON object.
Receive the same parameters as JavaScriptCatalog and return a response
with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
def render_to_response(self, context, **response_kwargs):
return JsonResponse(context)
|
{
"content_hash": "a431fd8ad8cc9d36a93ae1690826e6f6",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 117,
"avg_line_length": 35.18125,
"alnum_prop": 0.6093444661573992,
"repo_name": "harisibrahimkv/django",
"id": "ce78742fd8d41193e1295d1df8bb481744be6471",
"size": "11258",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "django/views/i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "203052"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11843337"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from six.moves import urllib
from nova import exception
from nova.i18n import _
from nova import utils
class SecurityGroupBase(object):
def parse_cidr(self, cidr):
if cidr:
try:
cidr = urllib.parse.unquote(cidr).decode()
except Exception as e:
self.raise_invalid_cidr(cidr, e)
if not utils.is_valid_cidr(cidr):
self.raise_invalid_cidr(cidr)
return cidr
else:
return '0.0.0.0/0'
@staticmethod
def new_group_ingress_rule(grantee_group_id, protocol, from_port,
to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, group_id=grantee_group_id)
@staticmethod
def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, cidr=grantee_cidr)
@staticmethod
def _new_ingress_rule(ip_protocol, from_port, to_port,
group_id=None, cidr=None):
values = {}
if group_id:
values['group_id'] = group_id
# Open everything if an explicit port range or type/code are not
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
and to_port is None):
from_port = 1
to_port = 65535
elif cidr:
values['cidr'] = cidr
if ip_protocol and from_port is not None and to_port is not None:
ip_protocol = str(ip_protocol)
try:
# Verify integer conversions
from_port = int(from_port)
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
raise exception.InvalidInput(reason=_("Type and"
" Code must be integers for ICMP protocol type"))
else:
raise exception.InvalidInput(reason=_("To and From ports "
"must be integers"))
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid %s ports should"
" be between 1-65535"
% ip_protocol.upper())
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if cidr:
return None
return values
def create_security_group_rule(self, context, security_group, new_rule):
if self.rule_exists(security_group, new_rule):
msg = (_('This rule already exists in group %s') %
new_rule['parent_group_id'])
self.raise_group_already_exists(msg)
return self.add_rules(context, new_rule['parent_group_id'],
security_group['name'],
[new_rule])[0]
def rule_exists(self, security_group, new_rule):
"""Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != new_rule.get(key):
break
else:
return rule.get('id') or True
return False
def validate_property(self, value, property, allowed):
pass
def ensure_default(self, context):
pass
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
pass
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
pass
def populate_security_groups(self, security_groups):
"""Called when populating the database for an instances
security groups.
"""
raise NotImplementedError()
def create_security_group(self, context, name, description):
raise NotImplementedError()
def update_security_group(self, context, security_group,
name, description):
raise NotImplementedError()
def get(self, context, name=None, id=None, map_exception=False):
raise NotImplementedError()
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
raise NotImplementedError()
def destroy(self, context, security_group):
raise NotImplementedError()
def add_rules(self, context, id, name, vals):
raise NotImplementedError()
def remove_rules(self, context, security_group, rule_ids):
raise NotImplementedError()
def get_rule(self, context, id):
raise NotImplementedError()
def get_instance_security_groups(self, context, instance, detailed=False):
raise NotImplementedError()
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to add
"""
raise NotImplementedError()
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to remove
"""
raise NotImplementedError()
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
|
{
"content_hash": "872d0abc337289702487e8c5c32c49a5",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 78,
"avg_line_length": 35.724444444444444,
"alnum_prop": 0.5666832545409306,
"repo_name": "alaski/nova",
"id": "66244ef9c815a0533a0f6789a0407114e62c573c",
"size": "8876",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/network/security_group/security_group_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16744610"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
}
|
"""
####################################################################################################
TITLE : HPE XP7 Migration, Migrate
DESCRIPTION : Migrate the data to the new server
AUTHOR : Koen Schets / StorageTeam
VERSION : Based on previous ODR framework
1.0 Initial version
CONFIG : xpmig.ini
LOG : xpmig_migrate.log
TODO :
Check CaJ replication is > 90%
Wait for the source host to logout
Remove hba_wwns to prevent re-login
Wait for the syncrate to be 100%
Request operator confirmation before split
Stop CaJ replication
Detach external storage raidgroups
Show the overview of the migration in a foot-window
####################################################################################################
"""
import curses
|
{
"content_hash": "32a41d63c86353b76ce853ae48111798",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 100,
"avg_line_length": 27.24137931034483,
"alnum_prop": 0.5379746835443038,
"repo_name": "kschets/XP_migrator",
"id": "5637b918bfe12546d684d4eda20abbd8a7a22808",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xpmig_migrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141968"
}
],
"symlink_target": ""
}
|
import sys
from subprocess import call, Popen, PIPE
from devlib.utils.misc import escape_double_quotes
from wa import Command
from wa.framework import pluginloader
from wa.framework.configuration.core import MetaConfiguration, RunConfiguration
from wa.framework.exception import NotFoundError
from wa.framework.target.descriptor import list_target_descriptions
from wa.utils.types import caseless_string, identifier
from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
get_params_rst, underline)
from wa.utils.misc import which
class ShowCommand(Command):
name = 'show'
description = 'Display documentation for the specified plugin (workload, instrument, etc.).'
def initialize(self, context):
self.parser.add_argument('plugin', metavar='PLUGIN',
help='The name of the plugin to display documentation for.')
def execute(self, state, args):
name = identifier(args.plugin)
rst_output = None
if name == caseless_string('settings'):
rst_output = get_rst_for_global_config()
rst_output += get_rst_for_envars()
plugin_name = name.lower()
kind = 'global:'
else:
try:
plugin = pluginloader.get_plugin_class(name)
except NotFoundError:
plugin = None
if plugin:
rst_output = get_rst_from_plugin(plugin)
plugin_name = plugin.name
kind = '{}:'.format(plugin.kind)
else:
target = get_target_description(name)
if target:
rst_output = get_rst_from_target(target)
plugin_name = target.name
kind = 'target:'
if not rst_output:
raise NotFoundError('Could not find plugin or alias "{}"'.format(name))
if which('pandoc'):
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sys.version_info[0] == 3:
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
output = output.decode(sys.stdout.encoding)
else:
output, _ = p.communicate(rst_output)
# Make sure to double escape back slashes
output = output.replace('\\', '\\\\\\')
# Correctly format the title and page number of the man page
title, body = output.split('\n', 1)
title = '.TH {}{} 7'.format(kind, plugin_name)
output = '\n'.join([title, body])
call('echo "{}" | man -l -'.format(escape_double_quotes(output)), shell=True)
else:
print(rst_output) # pylint: disable=superfluous-parens
def get_target_description(name):
targets = list_target_descriptions()
for target in targets:
if name == identifier(target.name):
return target
def get_rst_from_target(target):
text = underline(target.name, '~')
if hasattr(target, 'description'):
desc = strip_inlined_text(target.description or '')
text += desc
text += underline('Device Parameters:', '-')
text += get_params_rst(target.conn_params)
text += get_params_rst(target.platform_params)
text += get_params_rst(target.target_params)
text += get_params_rst(target.assistant_params)
text += '.. Note: For available runtime parameters please see the documentation'
return text + '\n'
def get_rst_for_global_config():
text = underline('Global Configuration')
text += 'These parameters control the behaviour of WA/run as a whole, they ' \
'should be set inside a config file (either located in ' \
'$WA_USER_DIRECTORY/config.yaml or one which is specified with -c), ' \
'or into config/global section of the agenda.\n\n'
cfg_points = MetaConfiguration.config_points + RunConfiguration.config_points
text += get_params_rst(cfg_points)
return text
def get_rst_for_envars():
text = underline('Environment Variables')
text += '''WA_USER_DIRECTORY: str
This is the location WA will look for config.yaml, plugins, dependencies,
and it will also be used for local caches, etc. If this variable is not set,
the default location is ``~/.workload_automation`` (this is created when WA
is installed).
.. note.. This location must be writable by the user who runs WA.'''
return text
|
{
"content_hash": "b7a6d5a6659b0c7869118d1f0771d166",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 97,
"avg_line_length": 38.46153846153846,
"alnum_prop": 0.6137777777777778,
"repo_name": "setrofim/workload-automation",
"id": "45531a4c4d38b733a018a83c294fedcdb9a30754",
"size": "5303",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wa/commands/show.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "84102"
},
{
"name": "Dockerfile",
"bytes": "3757"
},
{
"name": "Java",
"bytes": "241190"
},
{
"name": "Makefile",
"bytes": "1123"
},
{
"name": "Python",
"bytes": "1103558"
},
{
"name": "Shell",
"bytes": "39855"
},
{
"name": "TSQL",
"bytes": "6384"
},
{
"name": "Vim script",
"bytes": "901"
}
],
"symlink_target": ""
}
|
"""Support for the ZHA platform."""
import functools
import time
from homeassistant.components.device_tracker import DOMAIN, SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core import discovery
from .core.const import (
CHANNEL_POWER_CONFIGURATION,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
from .sensor import Battery
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation device tracker from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_POWER_CONFIGURATION)
class ZHADeviceScannerEntity(ScannerEntity, ZhaEntity):
"""Represent a tracked device."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize the ZHA device tracker."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._battery_channel = self.cluster_channels.get(CHANNEL_POWER_CONFIGURATION)
self._connected = False
self._keepalive_interval = 60
self._should_poll = True
self._battery_level = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
if self._battery_channel:
self.async_accept_signal(
self._battery_channel,
SIGNAL_ATTR_UPDATED,
self.async_battery_percentage_remaining_updated,
)
async def async_update(self):
"""Handle polling."""
if self.zha_device.last_seen is None:
self._connected = False
else:
difference = time.time() - self.zha_device.last_seen
if difference > self._keepalive_interval:
self._connected = False
else:
self._connected = True
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._connected
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_ROUTER
@callback
def async_battery_percentage_remaining_updated(self, attr_id, attr_name, value):
"""Handle tracking."""
if attr_name != "battery_percentage_remaining":
return
self.debug("battery_percentage_remaining updated: %s", value)
self._connected = True
self._battery_level = Battery.formatter(value)
self.async_write_ha_state()
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return self._battery_level
|
{
"content_hash": "40a65bd3deea0b422f2b888f50814dad",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 86,
"avg_line_length": 33.505050505050505,
"alnum_prop": 0.651492312330419,
"repo_name": "adrienbrault/home-assistant",
"id": "ffb37e33b0fcc3f1a18dddf2e9802bc799970d6e",
"size": "3317",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import tweepy
import json
import re
import time
import random
def create_api(config_filename):
"""
Creates an authorized tweepy API object given a config file containing
appropriate twitter application keys
:param config_filename: string containing the config filename
:return: the tweepy API object associated with the authorized twitter
application
"""
with open(config_filename) as api_keys:
keys = json.load(api_keys)['twitter']
api_key = keys['API Key']
secret_key = keys['API Secret']
access_tok = keys['Access Token']
access_tok_sec = keys['Access Token Secret']
auth = tweepy.OAuthHandler(api_key,secret_key)
auth.set_access_token(access_tok, access_tok_sec)
api = tweepy.API(auth)
return api
def limit_handled(cursor):
"""
Function to handle api call limits. When limit is reached, the function
will wait 15 minutes before iterating. From Tweepy website
:param cursor:
:return:
"""
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(15 * 60)
def tokenize(tweet):
"""
Uses regular expressions to tokenize tweets
:param tweet: the text of a given tweet
:return: the tokenization of that tweet as a list
"""
emoticons_str = r"""
(?:
[:=;] #
[oO\-]?
[D\)\]\(\]/\\OpP]
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
return tokens_re.findall(tweet)
class Markov_Chain:
def __init__(self):
self.mc = {}
class Probability_Distribution:
def __init__(self):
self.dist = {}
self.total = 0
def pick(self):
"""
Randomly returns a random token given the current distribution
:return: a random token from the distribution
"""
randnum = random.randrange(self.total)
currDex = 0
for token in self.dist:
currCnt = self.dist[token]
if randnum < currCnt + currDex:
return token
currDex += currCnt
def update(self, token):
"""
Increment the probability of encountering a certain token
:param token: a string containing the token
"""
if token in self.dist:
self.dist[token] += 1
else:
self.dist[token] = 1
self.total += 1
def update_markov_chain(self, tokens):
"""
Updates the markov structure with a new tokenized tweet
:param tokens: list of strings from tokenized tweet
"""
for i in range(1,len(tokens)):
if tokens[i-1] in self.mc:
self.mc[tokens[i-1]].update(tokens[i])
else:
self.mc[tokens[i-1]] = self.Probability_Distribution()
self.mc[tokens[i-1]].update(tokens[i])
#need to account for final token
if i == len(tokens) - 1:
if tokens[i] in self.mc:
self.mc[tokens[i]].update('END_OF_TWEET')
else:
self.mc[tokens[i]] = self.Probability_Distribution()
self.mc[tokens[i]].update('END_OF_TWEET')
def train_on_tweets(self, api, ids, limit = -1):
"""
Trains the given markov chain on the given twitter handles
:param api: the authorized tweepy api object
:param ids: list of ids you'd like to train on
:param limit: limits the number of tweets, default no limit
:return:
"""
for user in ids:
if (limit > 0):
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items(limit)):
self.update_markov_chain(tokenize(tweet.text))
else:
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id = user).items()):
self.update_markov_chain(tokenize(tweet.text))
def save_markov_chain(self, filename):
"""
Serializes a markov chain into a JSON file
:param filename: string containing path
"""
with open(filename, 'w') as outfile:
json.dumps(self.mc)
def load_markov_chain(self, filename):
"""
Loads a previously trained markov chain from a json file
:param filename: string containing path
"""
with open(filename) as infile:
self.mc = json.load(infile)
def generate_next_token(self, token):
"""
Given a token, produces a likely next token
:param token:
:return:
"""
return self.mc[token].pick()
def generate_tweet(self, seed):
"""
Takes an intial word then generates a tweet string
:param seed: the initial word
:return: string containing generated tweet
"""
tweet = seed
while len(tweet) < 140:
try:
next = self.generate_next_token(seed)
if next == "END_OF_TWEET":
break
tweet += " " + next
seed = next
except KeyError:
print "Seed not present in the Markov Chain"
return ""
return tweet
|
{
"content_hash": "fd7940007881b98a016d3f8d87185b8d",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 101,
"avg_line_length": 28.49514563106796,
"alnum_prop": 0.5296422487223169,
"repo_name": "wilg64/MarkovTweet",
"id": "cbdf9a68f6435750c367ed207ab72e74a21d63f7",
"size": "5870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markovtweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7506"
}
],
"symlink_target": ""
}
|
import copy
from rdkit.Chem.FeatMaps import FeatMaps
class MergeMethod(object):
# Put the new point at the weighted average position of the two fused points
WeightedAverage = 0
# Put the new point at the un-weighted average position of the two fused points
Average = 1
# Put the new point at the position of the larger (by weight) of the two points
UseLarger = 2
@classmethod
def valid(cls, mergeMethod):
""" Check that mergeMethod is valid """
if mergeMethod not in (cls.WeightedAverage, cls.Average, cls.UseLarger):
raise ValueError('unrecognized mergeMethod')
class MergeMetric(object):
# Do not merge points
NoMerge = 0
# merge two points if they come within a threshold distance
Distance = 1
# merge two points if their percent overlap exceeds a threshold
Overlap = 2
@classmethod
def valid(cls, mergeMetric):
""" Check that mergeMetric is valid """
if mergeMetric not in (cls.NoMerge, cls.Distance, cls.Overlap):
raise ValueError('unrecognized mergeMetric')
class DirMergeMode(object):
# Do not merge directions (i.e. keep all direction vectors)
NoMerge = 0
# Sum direction vectors
Sum = 1
@classmethod
def valid(cls, dirMergeMode):
""" Check that dirMergeMode is valid """
if dirMergeMode not in (cls.NoMerge, cls.Sum):
raise ValueError('unrecognized dirMergeMode')
def __copyAll(res, fm1, fm2):
""" no user-serviceable parts inside """
for feat in fm1.GetFeatures():
res.AddFeatPoint(copy.deepcopy(feat))
for feat in fm2.GetFeatures():
res.AddFeatPoint(copy.deepcopy(feat))
def GetFeatFeatDistMatrix(fm, mergeMetric, mergeTol, dirMergeMode, compatFunc):
"""
NOTE that mergeTol is a max value for merging when using distance-based
merging and a min value when using score-based merging.
"""
MergeMetric.valid(mergeMetric)
numFeatures = fm.GetNumFeatures()
dists = [[1e8] * numFeatures for _ in range(numFeatures)]
if mergeMetric == MergeMetric.NoMerge:
return dists
# Setup distance matrix, depending on mergeMetric.
benchmarkDict = { MergeMetric.Distance: mergeTol * mergeTol, MergeMetric.Overlap: mergeTol}
benchmark = benchmarkDict[mergeMetric]
def assignMatrix(matrix, i, j, value, constraint):
if value < constraint:
matrix[i][j] = value
matrix[j][i] = value
getFeature = fm.GetFeature
for i in range(numFeatures):
ptI = getFeature(i)
for j in range(i + 1, numFeatures):
ptJ = getFeature(j)
if compatFunc(ptI, ptJ):
if mergeMetric == MergeMetric.Distance:
dist2 = ptI.GetDist2(ptJ)
assignMatrix(matrix=dists, i=i, j=j, value=dist2, constraint=benchmark)
elif mergeMetric == MergeMetric.Overlap:
score = fm.GetFeatFeatScore(ptI, ptJ, typeMatch=False) * (-1 * ptJ.weight)
assignMatrix(matrix=dists, i=i, j=j, value=score, constraint=benchmark)
return dists
def familiesMatch(f1, f2):
return f1.GetFamily() == f2.GetFamily()
def feq(v1, v2, tol=1e-4):
return abs(v1 - v2) < tol
def MergeFeatPoints(fm, mergeMetric=MergeMetric.NoMerge, mergeTol=1.5,
dirMergeMode=DirMergeMode.NoMerge, mergeMethod=MergeMethod.WeightedAverage,
compatFunc=familiesMatch):
"""
NOTE that mergeTol is a max value for merging when using distance-based
merging and a min value when using score-based merging.
returns whether or not any points were actually merged
"""
MergeMetric.valid(mergeMetric)
MergeMethod.valid(mergeMethod)
DirMergeMode.valid(dirMergeMode)
res = False
if mergeMetric == MergeMetric.NoMerge:
return res
dists = GetFeatFeatDistMatrix(fm, mergeMetric, mergeTol, dirMergeMode, compatFunc)
distOrders = [None] * len(dists)
for i, distV in enumerate(dists):
distOrders[i] = []
for j, dist in enumerate(distV):
if dist < mergeTol:
distOrders[i].append((dist, j))
distOrders[i].sort()
# print('distOrders:')
# print(distOrders)
# we now know the "distances" and have rank-ordered list of
# each point's neighbors. Work with that.
# progressively merge nearest neighbors until there
# are no more points left to merge
featsInPlay = list(range(fm.GetNumFeatures()))
featsToRemove = []
# print '--------------------------------'
while featsInPlay:
# find two features who are mutual nearest neighbors:
fipCopy = featsInPlay[:]
for fi in fipCopy:
# print('>>>',fi,fipCopy,featsInPlay)
# print('\t',distOrders[fi])
mergeThem = False
if not distOrders[fi]:
featsInPlay.remove(fi)
continue
dist, nbr = distOrders[fi][0]
if nbr not in featsInPlay:
continue
if distOrders[nbr][0][1] == fi:
# print 'direct:',fi,nbr
mergeThem = True
else:
# it may be that there are several points at about the same distance,
# check for that now
if feq(distOrders[nbr][0][0], dist):
for distJ, nbrJ in distOrders[nbr][1:]:
if feq(dist, distJ):
if nbrJ == fi:
# print 'indirect: ',fi,nbr
mergeThem = True
break
else:
break
# print ' bottom:',mergeThem
if mergeThem:
break
if mergeThem:
res = True
featI = fm.GetFeature(fi)
nbrFeat = fm.GetFeature(nbr)
if mergeMethod == MergeMethod.WeightedAverage:
newPos = featI.GetPos() * featI.weight + nbrFeat.GetPos() * nbrFeat.weight
newPos /= (featI.weight + nbrFeat.weight)
newWeight = (featI.weight + nbrFeat.weight) / 2
elif mergeMethod == MergeMethod.Average:
newPos = featI.GetPos() + nbrFeat.GetPos()
newPos /= 2
newWeight = (featI.weight + nbrFeat.weight) / 2
elif mergeMethod == MergeMethod.UseLarger:
if featI.weight > nbrFeat.weight:
newPos = featI.GetPos()
newWeight = featI.weight
else:
newPos = nbrFeat.GetPos()
newWeight = nbrFeat.weight
featI.SetPos(newPos)
featI.weight = newWeight
# nbr and fi are no longer valid targets:
# print 'nbr done:',nbr,featsToRemove,featsInPlay
featsToRemove.append(nbr)
featsInPlay.remove(fi)
featsInPlay.remove(nbr)
for nbrList in distOrders:
try:
nbrList.remove(fi)
except ValueError:
pass
try:
nbrList.remove(nbr)
except ValueError:
pass
else:
# print ">>>> Nothing found, abort"
break
featsToRemove.sort()
for i, fIdx in enumerate(featsToRemove):
fm.DropFeature(fIdx - i)
return res
def CombineFeatMaps(fm1, fm2, mergeMetric=MergeMetric.NoMerge, mergeTol=1.5,
dirMergeMode=DirMergeMode.NoMerge):
"""
the parameters will be taken from fm1
"""
res = FeatMaps.FeatMap(params=fm1.params)
__copyAll(res, fm1, fm2)
if mergeMetric != MergeMetric.NoMerge:
MergeFeatPoints(res, mergeMetric=mergeMetric, mergeTol=mergeTol)
return res
|
{
"content_hash": "8b56ab0ec3f96afcab47076438f7177f",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 95,
"avg_line_length": 31.035087719298247,
"alnum_prop": 0.6526286037309215,
"repo_name": "rdkit/rdkit",
"id": "4aee8b46c1635254dfe0e9a061b244e14dec4881",
"size": "7341",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rdkit/Chem/FeatMaps/FeatMapUtils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1595174"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13855391"
},
{
"name": "CMake",
"bytes": "761863"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369457"
},
{
"name": "JavaScript",
"bytes": "54009"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4157348"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61677"
}
],
"symlink_target": ""
}
|
"""Test for catching non-exceptions."""
# pylint: disable=too-many-ancestors, no-absolute-import, import-error, multiple-imports,wrong-import-position
from __future__ import print_function
import socket, binascii
class MyException(object):
"""Custom 'exception'."""
class MySecondException(object):
"""Custom 'exception'."""
class MyGoodException(Exception):
"""Custom exception."""
class MySecondGoodException(MyGoodException):
"""Custom exception."""
class SkipException(socket.error):
"""Not an exception for Python 2, but one in 3."""
class SecondSkipException(SkipException):
"""Also a good exception."""
try:
1 + 1
except MyException: # [catching-non-exception]
print("caught")
try:
1 + 2
# +1:[catching-non-exception,catching-non-exception]
except (MyException, MySecondException):
print("caught")
try:
1 + 3
except MyGoodException:
print("caught")
try:
1 + 3
except (MyGoodException, MySecondGoodException):
print("caught")
try:
1 + 3
except (SkipException, SecondSkipException):
print("caught")
try:
1 + 42
# +1:[catching-non-exception,catching-non-exception]
except (None, list()):
print("caught")
try:
1 + 24
except None: # [catching-non-exception]
print("caught")
EXCEPTION = None
EXCEPTION = ZeroDivisionError
try:
1 + 46
except EXCEPTION:
print("caught")
try:
1 + 42
# +1:[catching-non-exception,catching-non-exception,catching-non-exception]
except (list([4, 5, 6]), None, ZeroDivisionError, 4):
print("caught")
EXCEPTION_TUPLE = (ZeroDivisionError, OSError)
NON_EXCEPTION_TUPLE = (ZeroDivisionError, OSError, 4)
try:
1 + 42
except EXCEPTION_TUPLE:
print("caught")
try:
1 + 42
except NON_EXCEPTION_TUPLE: # [catching-non-exception]
print("caught")
from missing_import import UnknownError
UNKNOWN_COMPONENTS = (ZeroDivisionError, UnknownError)
try:
1 + 42
except UNKNOWN_COMPONENTS:
print("caught")
try:
1 + 42
except binascii.Error:
print('builtin and detected')
try:
1 + 45
except object: # [catching-non-exception]
print('caught')
try:
1 + 42
except range: # [catching-non-exception]
print('caught')
|
{
"content_hash": "bf587b1718af633a1d310e809517bc53",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 110,
"avg_line_length": 20.05504587155963,
"alnum_prop": 0.6875571820677036,
"repo_name": "mith1979/ansible_automation",
"id": "1eca134d2b5ad2f90d6d411ae553055da8e17ed2",
"size": "2186",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pylint/test/functional/invalid_exceptions_caught.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from typing import Optional, Text, Tuple, Union
import tensorflow as tf
from tensorflow.python.layers.utils import smart_cond
from tensorflow.keras import backend as K
import numpy as np
from rasa.utils.tensorflow.layers import RandomlyConnectedDense
# from https://www.tensorflow.org/tutorials/text/transformer
# and https://github.com/tensorflow/tensor2tensor
class MultiHeadAttention(tf.keras.layers.Layer):
"""Multi-headed attention layer.
Arguments:
units: Positive integer, output dim of hidden layer.
num_heads: Positive integer, number of heads
to repeat the same attention structure.
attention_dropout_rate: Float, dropout rate inside attention for training.
density: Approximate fraction of trainable weights (in
`RandomlyConnectedDense` layers).
unidirectional: Boolean, use a unidirectional or bidirectional encoder.
use_key_relative_position: Boolean, if 'True' use key
relative embeddings in attention.
use_value_relative_position: Boolean, if 'True' use value
relative embeddings in attention.
max_relative_position: Positive integer, max position for relative embeddings.
heads_share_relative_embedding: Boolean, if 'True'
heads will share relative embeddings.
"""
def __init__(
self,
units: int,
num_heads: int,
attention_dropout_rate: float = 0.0,
density: float = 0.2,
unidirectional: bool = False,
use_key_relative_position: bool = False,
use_value_relative_position: bool = False,
max_relative_position: int = 5,
heads_share_relative_embedding: bool = False,
) -> None:
super().__init__()
if units % num_heads != 0:
raise ValueError(
f"number of units {units} should be proportional to "
f"number of attention heads {num_heads}."
)
self.num_heads = num_heads
self.units = units
self.attention_dropout_rate = attention_dropout_rate
self.unidirectional = unidirectional
self.use_key_relative_position = use_key_relative_position
self.use_value_relative_position = use_value_relative_position
self.relative_length = max_relative_position
self.relative_length += 1 # include current time
self.heads_share_relative_embedding = heads_share_relative_embedding
self._depth = units // self.num_heads
# process queries
self._query_dense_layer = RandomlyConnectedDense(
units=units, use_bias=False, density=density
)
# process keys
self._key_dense_layer = RandomlyConnectedDense(
units=units, use_bias=False, density=density
)
# process values
self._value_dense_layer = RandomlyConnectedDense(
units=units, use_bias=False, density=density
)
# process attention output
self._output_dense_layer = RandomlyConnectedDense(units=units, density=density)
self._create_relative_embeddings()
def _create_relative_embeddings(self) -> None:
"""Create relative embeddings."""
relative_embedding_shape = None
self.key_relative_embeddings = None
self.value_relative_embeddings = None
if self.use_key_relative_position or self.use_value_relative_position:
if not self.relative_length:
raise ValueError(
f"Max relative position {self.relative_length} "
f"should be > 0 when using relative attention."
)
if self.unidirectional:
relative_length = self.relative_length
else:
relative_length = 2 * self.relative_length - 1
if self.heads_share_relative_embedding:
relative_embedding_shape = (relative_length, self._depth)
else:
relative_embedding_shape = (
self.num_heads,
relative_length,
self._depth,
)
if self.use_key_relative_position:
self.key_relative_embeddings = self.add_weight(
shape=relative_embedding_shape, name="key_relative_embeddings"
)
if self.use_value_relative_position:
self.value_relative_embeddings = self.add_weight(
shape=relative_embedding_shape, name="value_relative_embeddings"
)
def _pad_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor:
# pad the left side to length
pad_left = x[:, :, :, :1, :]
pad_left = tf.tile(pad_left, (1, 1, 1, length - self.relative_length, 1))
# pad the right side to length
if self.unidirectional:
right_relative_length = 1 # current time
pad_right = tf.zeros_like(x[:, :, :, -1:, :])
else:
right_relative_length = self.relative_length
pad_right = x[:, :, :, -1:, :]
pad_right = tf.tile(pad_right, (1, 1, 1, length - right_relative_length, 1))
return tf.concat([pad_left, x, pad_right], axis=-2)
def _slice_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor:
if self.unidirectional:
# pad the right side to relative_length
pad_right = tf.zeros_like(x[:, :, :, -1:, :])
pad_right = tf.tile(pad_right, (1, 1, 1, self.relative_length - 1, 1))
x = tf.concat([x, pad_right], axis=-2)
extra_length = self.relative_length - length
full_length = tf.shape(x)[-2]
return x[:, :, :, extra_length : full_length - extra_length, :]
def _relative_to_absolute_position(self, x: tf.Tensor) -> tf.Tensor:
"""Universal method to convert tensor from relative to absolute indexing.
"Slides" relative embeddings by 45 degree.
Arguments:
x: A tensor of shape (batch, num_heads, length, relative_length, depth)
or (batch, num_heads, length, relative_length)
Returns:
A tensor of shape (batch, num_heads, length, length, depth)
or (batch, num_heads, length, length)
"""
x_dim = len(x.shape)
if x_dim < 4 or x_dim > 5:
raise ValueError(
f"Relative tensor has a wrong shape {x.shape}, "
f"it should have 4 or 5 dimensions."
)
if x_dim == 4:
# add fake depth dimension
x = tf.expand_dims(x, axis=-1)
batch = tf.shape(x)[0]
num_heads = tf.shape(x)[1]
length = tf.shape(x)[2]
depth = tf.shape(x)[-1]
x = tf.cond(
length > self.relative_length,
lambda: self._pad_relative_embeddings(x, length),
lambda: self._slice_relative_embeddings(x, length),
)
# add a column of zeros to "slide" columns to diagonals through reshape
pad_shift = tf.zeros_like(x[:, :, :, -1:, :])
x = tf.concat([x, pad_shift], axis=-2)
# flatten length dimensions
x = tf.reshape(x, (batch, num_heads, -1, depth))
width = 2 * length
# add zeros so that the result of back reshape is still a matrix
pad_flat = tf.zeros_like(
x[:, :, : ((width - 1) - width * length % (width - 1)) % (width - 1), :]
)
x = tf.concat([x, pad_flat], axis=-2)
# "slide" columns to diagonals through reshape
x = tf.reshape(x, (batch, num_heads, -1, width - 1, depth))
# slice needed "diagonal" matrix
x = x[:, :, :-1, -length:, :]
if x_dim == 4:
# remove fake depth dimension
x = tf.squeeze(x, axis=-1)
return x
def _matmul_with_relative_keys(self, x: tf.Tensor) -> tf.Tensor:
y = self.key_relative_embeddings
if self.heads_share_relative_embedding:
matmul = tf.einsum("bhld,md->bhlm", x, y)
else:
matmul = tf.einsum("bhld,hmd->bhlm", x, y)
return self._relative_to_absolute_position(matmul)
def _tile_relative_embeddings(self, x: tf.Tensor, length: tf.Tensor) -> tf.Tensor:
if self.heads_share_relative_embedding:
x = tf.expand_dims(x, axis=0) # add head dimension
x = tf.expand_dims(x, axis=1) # add length dimension
x = tf.tile(x, (1, length, 1, 1))
return tf.expand_dims(x, axis=0) # add batch dimension
def _squeeze_relative_embeddings(self, x: tf.Tensor) -> tf.Tensor:
x = tf.squeeze(x, axis=0) # squeeze batch dimension
if self.heads_share_relative_embedding:
x = tf.squeeze(x, axis=1) # squeeze head dimension
return x
def _matmul_with_relative_values(self, x: tf.Tensor) -> tf.Tensor:
y = self._tile_relative_embeddings(
self.value_relative_embeddings, tf.shape(x)[-2]
)
y = self._relative_to_absolute_position(y)
y = self._squeeze_relative_embeddings(y)
if self.heads_share_relative_embedding:
return tf.einsum("bhlm,lmd->bhld", x, y)
else:
return tf.einsum("bhlm,hlmd->bhld", x, y)
def _drop_attention_logits(
self, logits: tf.Tensor, pad_mask: tf.Tensor, training: tf.Tensor
) -> tf.Tensor:
def droped_logits() -> tf.Tensor:
keep_prob = tf.random.uniform(tf.shape(logits), 0, 1) + pad_mask
drop_mask = tf.cast(
tf.less(keep_prob, self.attention_dropout_rate), logits.dtype
)
return logits + drop_mask * -1e9
return smart_cond(training, droped_logits, lambda: tf.identity(logits))
def _scaled_dot_product_attention(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
pad_mask: tf.Tensor,
training: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Calculate the attention weights.
query, key, value must have matching leading dimensions.
key, value must have matching penultimate dimension,
i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type (padding or look ahead)
but it must be broadcastable for addition.
Arguments:
query: A tensor with shape (..., length, depth).
key: A tensor with shape (..., length, depth).
value: A tensor with shape (..., length, depth).
pad_mask: Float tensor with shape broadcastable
to (..., length, length). Defaults to None.
Returns:
output: A tensor with shape (..., length, depth).
attention_weights: A tensor with shape (..., length, length).
"""
matmul_qk = tf.matmul(query, key, transpose_b=True) # (..., length, length)
if self.use_key_relative_position:
matmul_qk += self._matmul_with_relative_keys(query)
# scale matmul_qk
dk = tf.cast(tf.shape(key)[-1], tf.float32)
logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if pad_mask is not None:
logits += pad_mask * -1e9
# apply attention dropout before softmax to maintain attention_weights norm as 1
if self.attention_dropout_rate > 0:
logits = self._drop_attention_logits(logits, pad_mask, training)
# softmax is normalized on the last axis (length) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(logits, axis=-1) # (..., length, length)
output = tf.matmul(attention_weights, value) # (..., length, depth)
if self.use_value_relative_position:
output += self._matmul_with_relative_values(attention_weights)
return output, attention_weights
def _split_heads(self, x: tf.Tensor) -> tf.Tensor:
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is
(batch_size, num_heads, length, depth)
"""
x = tf.reshape(x, (tf.shape(x)[0], -1, self.num_heads, self._depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def _combine_heads(self, x: tf.Tensor) -> tf.Tensor:
"""Inverse of split_heads.
Args:
x: A Tensor with shape [batch, num_heads, length, units / num_heads]
Returns:
A Tensor with shape [batch, length, units]
"""
# (batch_size, length, num_heads, depth)
x = tf.transpose(x, perm=[0, 2, 1, 3])
# (batch_size, length, units)
return tf.reshape(x, (tf.shape(x)[0], -1, self.units))
# noinspection PyMethodOverriding
def call(
self,
query_input: tf.Tensor,
source_input: tf.Tensor,
pad_mask: Optional[tf.Tensor] = None,
training: Optional[Union[tf.Tensor, bool]] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply attention mechanism to query_input and source_input.
Arguments:
query_input: A tensor with shape [batch_size, length, input_size].
source_input: A tensor with shape [batch_size, length, input_size].
pad_mask: Float tensor with shape broadcastable
to (..., length, length). Defaults to None.
training: A bool, whether in training mode or not.
Returns:
Attention layer output with shape [batch_size, length, units]
"""
if training is None:
training = K.learning_phase()
query = self._query_dense_layer(query_input) # (batch_size, length, units)
key = self._key_dense_layer(source_input) # (batch_size, length, units)
value = self._value_dense_layer(source_input) # (batch_size, length, units)
query = self._split_heads(query) # (batch_size, num_heads, length, depth)
key = self._split_heads(key) # (batch_size, num_heads, length, depth)
value = self._split_heads(value) # (batch_size, num_heads, length, depth)
attention, attention_weights = self._scaled_dot_product_attention(
query, key, value, pad_mask, training
)
# attention.shape == (batch_size, num_heads, length, depth)
# attention_weights.shape == (batch_size, num_heads, length, length)
attention = self._combine_heads(attention) # (batch_size, length, units)
output = self._output_dense_layer(attention) # (batch_size, length, units)
return output, attention_weights
class TransformerEncoderLayer(tf.keras.layers.Layer):
"""Transformer encoder layer.
The layer is composed of the sublayers:
1. Self-attention layer
2. Feed-forward network (which is 2 fully-connected layers)
Arguments:
units: Positive integer, output dim of hidden layer.
num_heads: Positive integer, number of heads
to repeat the same attention structure.
filter_units: Positive integer, output dim of the first ffn hidden layer.
dropout_rate: Float between 0 and 1; fraction of the input units to drop.
attention_dropout_rate: Float, dropout rate inside attention for training.
density: Fraction of trainable weights in `RandomlyConnectedDense` layers.
unidirectional: Boolean, use a unidirectional or bidirectional encoder.
use_key_relative_position: Boolean, if 'True' use key
relative embeddings in attention.
use_value_relative_position: Boolean, if 'True' use value
relative embeddings in attention.
max_relative_position: Positive integer, max position for relative embeddings.
heads_share_relative_embedding: Boolean, if 'True'
heads will share relative embeddings.
"""
def __init__(
self,
units: int,
num_heads: int,
filter_units: int,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
density: float = 0.2,
unidirectional: bool = False,
use_key_relative_position: bool = False,
use_value_relative_position: bool = False,
max_relative_position: int = 5,
heads_share_relative_embedding: bool = False,
) -> None:
super().__init__()
self._layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self._mha = MultiHeadAttention(
units,
num_heads,
attention_dropout_rate,
density,
unidirectional,
use_key_relative_position,
use_value_relative_position,
max_relative_position,
heads_share_relative_embedding,
)
self._dropout = tf.keras.layers.Dropout(dropout_rate)
self._ffn_layers = [
tf.keras.layers.LayerNormalization(epsilon=1e-6),
RandomlyConnectedDense(
units=filter_units, activation=tf.nn.gelu, density=density
), # (batch_size, length, filter_units)
tf.keras.layers.Dropout(dropout_rate),
RandomlyConnectedDense(
units=units, density=density
), # (batch_size, length, units)
tf.keras.layers.Dropout(dropout_rate),
]
def call(
self,
x: tf.Tensor,
pad_mask: Optional[tf.Tensor] = None,
training: Optional[Union[tf.Tensor, bool]] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply transformer encoder layer.
Arguments:
x: A tensor with shape [batch_size, length, units].
pad_mask: Float tensor with shape broadcastable
to (..., length, length). Defaults to None.
training: A bool, whether in training mode or not.
Returns:
Transformer encoder layer output with shape [batch_size, length, units]
"""
if training is None:
training = K.learning_phase()
x_norm = self._layer_norm(x) # (batch_size, length, units)
attn_out, attn_weights = self._mha(
x_norm, x_norm, pad_mask=pad_mask, training=training
)
attn_out = self._dropout(attn_out, training=training)
x += attn_out
ffn_out = x # (batch_size, length, units)
for layer in self._ffn_layers:
ffn_out = layer(ffn_out, training=training)
x += ffn_out
# (batch_size, length, units), (batch_size, num_heads, length, length)
return x, attn_weights
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder.
Encoder stack is made up of `num_layers` identical encoder layers.
Arguments:
num_layers: Positive integer, number of encoder layers.
units: Positive integer, output dim of hidden layer.
num_heads: Positive integer, number of heads
to repeat the same attention structure.
filter_units: Positive integer, output dim of the first ffn hidden layer.
reg_lambda: Float, regularization factor.
dropout_rate: Float between 0 and 1; fraction of the input units to drop.
attention_dropout_rate: Float, dropout rate inside attention for training.
density: Approximate fraction of trainable weights (in
`RandomlyConnectedDense` layers).
unidirectional: Boolean, use a unidirectional or bidirectional encoder.
use_key_relative_position: Boolean, if 'True' use key
relative embeddings in attention.
use_value_relative_position: Boolean, if 'True' use value
relative embeddings in attention.
max_relative_position: Positive integer, max position for relative embeddings.
heads_share_relative_embedding: Boolean, if 'True'
heads will share relative embeddings.
name: Optional name of the layer.
"""
def __init__(
self,
num_layers: int,
units: int,
num_heads: int,
filter_units: int,
reg_lambda: float,
dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
density: float = 0.2,
unidirectional: bool = False,
use_key_relative_position: bool = False,
use_value_relative_position: bool = False,
max_relative_position: int = 5,
heads_share_relative_embedding: bool = False,
name: Optional[Text] = None,
) -> None:
super().__init__(name=name)
self.units = units
self.unidirectional = unidirectional
l2_regularizer = tf.keras.regularizers.l2(reg_lambda)
self._embedding = RandomlyConnectedDense(
units=units, kernel_regularizer=l2_regularizer, density=density
)
# positional encoding helpers
self._angles = self._get_angles()
self._even_indices = np.arange(0, self.units, 2, dtype=np.int32)[:, np.newaxis]
self._odd_indices = np.arange(1, self.units, 2, dtype=np.int32)[:, np.newaxis]
self._dropout = tf.keras.layers.Dropout(dropout_rate)
self._enc_layers = [
TransformerEncoderLayer(
units,
num_heads,
filter_units,
dropout_rate,
attention_dropout_rate,
density,
unidirectional,
use_key_relative_position,
use_value_relative_position,
max_relative_position,
heads_share_relative_embedding,
)
for _ in range(num_layers)
]
self._layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
def _get_angles(self) -> np.ndarray:
array_2d = np.arange(self.units)[np.newaxis, :]
return 1 / np.power(10000, (2 * (array_2d // 2)) / np.float32(self.units))
def _positional_encoding(self, max_position: tf.Tensor) -> tf.Tensor:
max_position = tf.cast(max_position, dtype=tf.float32)
angle_rads = tf.range(max_position)[:, tf.newaxis] * self._angles
# transpose for easy slicing
angle_rads = tf.transpose(angle_rads, perm=[1, 0])
shape = tf.shape(angle_rads)
# apply sin to even indices in the array; 2i
sin_even = tf.sin(tf.gather_nd(angle_rads, self._even_indices))
pos_encoding_even = tf.scatter_nd(self._even_indices, sin_even, shape)
# apply cos to odd indices in the array; 2i+1
cos_odd = tf.cos(tf.gather_nd(angle_rads, self._odd_indices))
pos_encoding_odd = tf.scatter_nd(self._odd_indices, cos_odd, shape)
# combine even and odd positions and transpose back
pos_encoding = tf.transpose(pos_encoding_even + pos_encoding_odd, perm=[1, 0])
# add batch dimension
return tf.stop_gradient(pos_encoding[tf.newaxis, ...])
@staticmethod
def _look_ahead_pad_mask(max_position: tf.Tensor) -> tf.Tensor:
pad_mask = 1 - tf.linalg.band_part(tf.ones((max_position, max_position)), -1, 0)
return pad_mask[tf.newaxis, tf.newaxis, :, :] # (1, 1, seq_len, seq_len)
def call(
self,
x: tf.Tensor,
pad_mask: Optional[tf.Tensor] = None,
training: Optional[Union[tf.Tensor, bool]] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply transformer encoder.
Arguments:
x: A tensor with shape [batch_size, length, input_size].
pad_mask: Float tensor with shape broadcastable
to (..., length, length). Defaults to None.
training: A bool, whether in training mode or not.
Returns:
Transformer encoder output with shape [batch_size, length, units]
"""
# adding embedding and position encoding.
x = self._embedding(x) # (batch_size, length, units)
x *= tf.math.sqrt(tf.cast(self.units, tf.float32))
x += self._positional_encoding(tf.shape(x)[1])
x = self._dropout(x, training=training)
if pad_mask is not None:
pad_mask = tf.squeeze(pad_mask, -1) # (batch_size, length)
pad_mask = pad_mask[:, tf.newaxis, tf.newaxis, :]
# pad_mask.shape = (batch_size, 1, 1, length)
if self.unidirectional:
# add look ahead pad mask to emulate unidirectional behavior
pad_mask = tf.minimum(
1.0, pad_mask + self._look_ahead_pad_mask(tf.shape(pad_mask)[-1])
) # (batch_size, 1, length, length)
layer_attention_weights = []
for layer in self._enc_layers:
x, attn_weights = layer(x, pad_mask=pad_mask, training=training)
layer_attention_weights.append(attn_weights)
# if normalization is done in encoding layers, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
x = self._layer_norm(x) # (batch_size, length, units)
# Keep the batch dimension on the first axis
attention_weights_as_output = tf.transpose(
tf.stack(layer_attention_weights), (1, 0, 2, 3, 4)
)
# (batch_size, length, units),
# (batch_size, num_layers, num_heads, length, length)
return x, attention_weights_as_output
|
{
"content_hash": "44860a67caf23736a94cac98e40a0d64",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 88,
"avg_line_length": 39.641065830721004,
"alnum_prop": 0.59511288600688,
"repo_name": "RasaHQ/rasa_nlu",
"id": "57a37a5fe13c62958e6f67d5864adb94debde03b",
"size": "25291",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/utils/tensorflow/transformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
}
|
'''
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import test_stub
import zstackwoodpecker.test_state as test_state
test_obj_dict = test_state.TestStateDict()
vol_num = 24
volume_list = []
def test():
global test_obj_dict
global vol_num
global volume
vm = test_stub.create_vm()
test_obj_dict.add_vm(vm)
vm.check()
for i in range(vol_num):
volume_list.append(test_stub.create_volume())
test_obj_dict.add_volume(volume_list[i])
additional_vol = test_stub.create_volume()
test_obj_dict.add_volume(additional_vol)
for i in range(vol_num):
volume_list[i].check()
test_util.test_dsc('Test attach/detach 24 volumes operations.')
for i in range(vol_num):
volume_list[i].attach(vm)
for i in range(vol_num):
volume_list[i].check()
for i in range(vol_num):
volume_list[i].detach()
volume_list[i].check()
test_util.test_dsc('Redo attach/detach 24 volumes operations.')
for i in range(vol_num):
volume_list[i].attach(vm)
volume_list[i].check()
test_util.test_dsc('Try to attach the 25th data volume.')
try:
additional_vol.attach(vm)
except:
test_util.test_logger('Catch expected exception: try to attach the 25th data [volume:] %s to [vm:] %s fail.' % (additional_vol.volume.uuid, vm.vm.uuid))
for i in range(vol_num):
volume_list[i].detach()
volume_list[i].check()
for i in range(vol_num):
volume_list[i].delete()
volume_list[i].check()
vm.destroy()
test_util.test_pass('Create Multi Data Volumes for VM Test Success')
return True
test_util.test_fail('Fail: could attached the 25th data [volume:] %s to [vm:] %s .' % (additional_vol.volume.uuid, vm.vm.uuid))
return False
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
{
"content_hash": "6dd295c98ead4845790435eb74540e8d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 160,
"avg_line_length": 29.183098591549296,
"alnum_prop": 0.6076254826254827,
"repo_name": "quarkonics/zstack-woodpecker",
"id": "9d57f2bdba912e004797e67873e5043bf18e1a25",
"size": "2072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/basic/test_add_multi_volumes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "1688448"
},
{
"name": "Shell",
"bytes": "48493"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "ffaf35350a7c0043df8ebebcc1209af1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6115702479338843,
"repo_name": "plotly/python-api",
"id": "003e029c90c1f56f0a2137a65faf2aa55c943d2d",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/hoverlabel/_namelengthsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import sublime
from sublime_plugin import TextCommand
class GsInsertTextAtCursorCommand(TextCommand):
"""
Insert the provided text at the current cursor position(s).
"""
def run(self, edit, text):
text_len = len(text)
selected_ranges = []
for region in self.view.sel():
selected_ranges.append((region.begin(), region.end()))
self.view.replace(edit, region, text)
self.view.sel().clear()
self.view.sel().add_all([sublime.Region(begin + text_len, end + text_len)
for begin, end in selected_ranges])
class GsReplaceViewTextCommand(TextCommand):
"""
Replace the contents of the view with the provided text and optional callback.
If cursors exist, make sure to place them where they were. Otherwise, add
a single cursor at the start of the file.
"""
def run(self, edit, text, nuke_cursors=False):
cursors_num = len(self.view.sel())
is_read_only = self.view.is_read_only()
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(0, self.view.size()), text)
self.view.set_read_only(is_read_only)
if not cursors_num or nuke_cursors:
selections = self.view.sel()
selections.clear()
pt = sublime.Region(0, 0)
selections.add(pt)
class GsReplaceRegionCommand(TextCommand):
"""
Replace the contents of a region within the view with the provided text.
"""
def run(self, edit, text, begin, end):
is_read_only = self.view.is_read_only()
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(begin, end), text)
self.view.set_read_only(is_read_only)
|
{
"content_hash": "4f7397ee4b5a68138322403cc1b5c593",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 82,
"avg_line_length": 31.410714285714285,
"alnum_prop": 0.6236498010233087,
"repo_name": "jmanuel1/GitSavvy",
"id": "f41b1f3860f55e78105a876a94cfc7517a0f96f0",
"size": "1759",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "common/commands/view_manipulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "52441"
},
{
"name": "Python",
"bytes": "296898"
}
],
"symlink_target": ""
}
|
import os
import pytest
import cPickle as pickle
import pandas as pd
from middleware.display.beautify import beautify, json_return, has_failed
from tests.constants import ARGS_DICT, BEAUTIFY_VF_SEROTYPE
vf_serotype_gene_dict = os.path.join('tests/refs', 'GCA_000005845.2_ASM584v2_genomic.fna_ectyper-vf_serotype.p')
amr_gene_dict = os.path.join('tests/refs', '2017-05-21-00-29-20-874628-GCA_000005845.2_ASM584v2_genomic.fna_rgi.tsv_rgi.p')
def test_beautify_vf_serotype():
## test vf & serotype json return
single_dict = dict(ARGS_DICT)
single_dict.update({'i': vf_serotype_gene_dict})
r = beautify(vf_serotype_gene_dict, single_dict)
assert isinstance(r, list)
assert len(r) == len(BEAUTIFY_VF_SEROTYPE)
def test_beautify_serotype_only():
## test serotype only json return
# note: this is actually the same gene results file as above
# we only differentiate what is returned to the user, because we want all analysis ran & added to the db
single_dict = dict(ARGS_DICT)
single_dict.update({'i': vf_serotype_gene_dict})
# this mimicks user selection of serotype only
single_dict.update({'options':{'vf': False, 'amr': False, 'serotype': True}})
# beautify is what is actually called by the RQ worker & returned to the user
r = beautify(vf_serotype_gene_dict, single_dict)
assert isinstance(r, list)
assert len(r) == 1
def test_beautify_json_r_serotype_only():
single_dict = dict(ARGS_DICT)
single_dict.update({'i': vf_serotype_gene_dict})
# this mimicks user selection of serotype only
single_dict.update({'options':{'vf': False, 'amr': False, 'serotype': True}})
## test json_r separately of failed handling
# json_return() is a part of the beautify work
gene_dict = pickle.load(open(vf_serotype_gene_dict, 'rb'))
assert type(gene_dict) == dict
assert len(gene_dict.keys()) == 2
r = json_return(gene_dict=gene_dict, args_dict=single_dict)
assert len(r) == 1
failed = has_failed(r)
assert failed == False
def test_beautify_amr_only():
single_dict = dict(ARGS_DICT)
single_dict.update({'i': amr_gene_dict})
# this mimicks user selection of serotype only
single_dict.update({'options':{'vf': False, 'amr': True, 'serotype': False}})
r = beautify(amr_gene_dict, single_dict)
assert isinstance(r, list)
assert len(r) > 1
def test_beautify_json_r_amr_only():
single_dict = dict(ARGS_DICT)
single_dict.update({'i': amr_gene_dict})
# this mimicks user selection of serotype only
single_dict.update({'options':{'vf': False, 'amr': True, 'serotype': False}})
gene_dict = pickle.load(open(amr_gene_dict, 'rb'))
assert type(gene_dict) == dict
assert len(gene_dict.keys()) == 1
assert 'Antimicrobial Resistance' in gene_dict.keys()
r = json_return(gene_dict=gene_dict, args_dict=single_dict)
assert len(r) > 1
## test some pandas stuff on the json_r
df = pd.DataFrame(r)
assert 'Serotype' not in df.analysis.unique()
assert 'Virulence Factors' not in df.analysis.unique()
assert 'Antimicrobial Resistance' in df.analysis.unique()
|
{
"content_hash": "3c7d7f5c8d716d10b1482599d058631a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 123,
"avg_line_length": 42.351351351351354,
"alnum_prop": 0.6860242501595405,
"repo_name": "superphy/backend",
"id": "3fbab9ed07b24e330c6e50231d991000209dc4ef",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/test_beautify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "392139"
}
],
"symlink_target": ""
}
|
NON_SORTABLE = 'non_sortable'
SEARCH_CONFIG = 'search_config'
|
{
"content_hash": "4711613f0c1d910680380d73242f8bc4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 31,
"alnum_prop": 0.7419354838709677,
"repo_name": "ENCODE-DCC/snovault",
"id": "aba772b96a2986c267221b1a8c6ae9b03d984221",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/snovault/elasticsearch/searches/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "6565"
},
{
"name": "HTML",
"bytes": "4517"
},
{
"name": "JavaScript",
"bytes": "312639"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "889638"
},
{
"name": "Ruby",
"bytes": "1010"
},
{
"name": "SCSS",
"bytes": "255158"
},
{
"name": "Shell",
"bytes": "697"
}
],
"symlink_target": ""
}
|
'''
Written by Lijun An and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import pandas as pd
from copy import deepcopy
from utils.misc import create_folder, txt2list
def merge_multi_bins(args,
round1_threshold_list,
round2_threshold_list,
name1='MACC',
name2='ADNI'):
"""
Merge multiple bins data
Args:
args (tuple): Parameters
round1_threshold_list (list): List of thresholds in round 1 matching
round2_threshold_list (list): List of thresholds in round 1 matching
name1 (str, optional): Name for dataset1. Defaults to 'MACC'.
name2 (str, optional): Name for dataset2. Defaults to 'ADNI'.
"""
bins_path = os.path.join(args.checkpoint_path, args.matching_pair,
'matching_' + str(args.nb_bins) + 'BINs')
merged_path = os.path.join(
bins_path, 'MERGED',
'AGE' + str(args.age_penalty) + '_SEX' + str(args.sex_penalty) + '_DX'
+ str(args.dx_penalty) + '_MMSE' + str(args.mmse_penalty))
create_folder(merged_path)
controled_MACC = pd.DataFrame()
controled_ADNI = pd.DataFrame()
for bin in range(args.nb_bins):
threhsold_name = 'Threshold_' + str(
round1_threshold_list[bin]) + '-' + str(round2_threshold_list[bin])
bin_path = os.path.join(bins_path, 'BIN_' + str(bin), 'merged')
bin_merged_path = os.path.join(
bin_path,
'AGE' + str(args.age_penalty) + '_SEX' + str(args.sex_penalty) +
'_DX' + str(args.dx_penalty) + '_MMSE' + str(args.mmse_penalty),
threhsold_name)
bin_macc_df = pd.read_csv(
os.path.join(bin_merged_path, 'controled_' + name1 + '.csv'))
bin_adni_df = pd.read_csv(
os.path.join(bin_merged_path, 'controled_' + name2 + '.csv'))
controled_MACC = controled_MACC.append(bin_macc_df, sort=False)
controled_ADNI = controled_ADNI.append(bin_adni_df, sort=False)
# save merged
controled_MACC.to_csv(
os.path.join(merged_path, 'controled_' + name1 + '.csv'),
sep=',',
index=False)
controled_ADNI.to_csv(
os.path.join(merged_path, 'controled_' + name2 + '.csv'),
sep=',',
index=False)
def extract_matched_unmatched_data(args,
input_path,
name1='MACC',
name2='ADNI'):
"""
Extract matched and unmatched data
Args:
args (tuple): Parameters
input_path (str): Path for input data
name1 (str, optional): Name for dataset1. Defaults to 'MACC'.
name2 (str, optional): Name for dataset2. Defaults to 'ADNI'.
"""
# columns
cols = txt2list(args.columns_path)
# read raw data
raw_dataset1 = pd.read_csv(args.MACC_data_path, usecols=cols)
raw_dataset2 = pd.read_csv(args.ADNI_data_path, usecols=cols)
# read matched data
matched_dataset1 = pd.read_csv(
os.path.join(input_path, 'controled_' + name1 + '.csv'))
matched_dataset2 = pd.read_csv(
os.path.join(input_path, 'controled_' + name2 + '.csv'))
# deepcopy data
matched_dataset1_copy = deepcopy(matched_dataset1)
matched_dataset2_copy = deepcopy(matched_dataset2)
# get matched RIDs
matched_subs_dataset1 = (matched_dataset1.RID).values
matched_subs_dataset2 = (matched_dataset2.RID).values
# check matched date (visists) all has DX and MMSE
# otherwise let the date to be nan
# if all dates of a subject are nan, remove the subject from matched list
# only forcus on dataset1 is enough
unqualfied_matched_subs_dataset1 = []
unqualfied_matched_subs_dataset2 = []
for i, sub in enumerate(matched_subs_dataset1):
matched_dates = matched_dataset1.iloc[matched_dataset1[
matched_dataset1.RID == sub].index, 1:].values[0]
matched_dates = matched_dates.astype(str)
matched_dates_val = matched_dates[matched_dates != str(np.nan)]
for date in matched_dates_val:
raw_mask = (raw_dataset1.RID == sub) & (
raw_dataset1.EXAMDATE == date)
dx = float(raw_dataset1.loc[raw_mask, ['DX']].values[0])
mmse = float(raw_dataset1.loc[raw_mask, ['MMSE']].values[0])
# check whether the dx and mmse is nan
if np.isnan(dx) or np.isnan(mmse):
# we need to set this date to nan
# get coordinate
index = np.where(matched_dates == date)[0]
matched_dataset1_copy.iloc[matched_dataset1_copy[
matched_dataset1_copy.RID == sub].index, index +
1] = np.nan
matched_dataset2_copy.iloc[matched_dataset1_copy[
matched_dataset1_copy.RID == sub].index, index +
1] = np.nan
# check whether all dates are nan
matched_dates_copy = matched_dataset1_copy.iloc[matched_dataset1_copy[
matched_dataset1_copy.RID == sub].index, 1:].values[0]
matched_dates_copy = matched_dates_copy.astype(str)
matched_dates_copy = matched_dates_copy[
matched_dates_copy != str(np.nan)]
if len(matched_dates_copy) == 0:
# move this subject to unmatched
unqualfied_matched_subs_dataset1.append(sub)
unqualfied_matched_subs_dataset2.append(matched_subs_dataset2[i])
# get unqualfied indexes
sorter = np.argsort(matched_subs_dataset1)
index = sorter[np.searchsorted(
matched_subs_dataset1, unqualfied_matched_subs_dataset1,
sorter=sorter)]
qualfied_matched_subs_dataset1 = np.delete(matched_subs_dataset1, index)
qualfied_matched_subs_dataset2 = np.delete(matched_subs_dataset2, index)
# generate unmatched data
matched_rowIndex_dataset1 = []
for sub in qualfied_matched_subs_dataset1:
sub_mask = (raw_dataset1.RID == sub)
matched_rowIndex_dataset1 += list(raw_dataset1[sub_mask].index.values)
# drop for dataset1
unmatched_dataset1 = raw_dataset1.drop(matched_rowIndex_dataset1, axis=0)
unmatched_dataset1.reset_index(drop=True)
unmatched_dataset1.to_csv(
os.path.join(input_path, name1 + '_unmatched.csv'),
sep=',',
index=False)
matched_rowIndex_dataset2 = []
for sub in qualfied_matched_subs_dataset2:
sub_mask = (raw_dataset2.RID == sub)
matched_rowIndex_dataset2 += list(raw_dataset2[sub_mask].index.values)
# drop for dataset1
unmatched_dataset2 = raw_dataset2.drop(matched_rowIndex_dataset2, axis=0)
unmatched_dataset2.reset_index(drop=True)
unmatched_dataset2.to_csv(
os.path.join(input_path, name2 + '_unmatched.csv'),
sep=',',
index=False)
# generate matched data
matched_data_datset1 = pd.DataFrame(columns=cols)
for sub in qualfied_matched_subs_dataset1:
matched_dates = matched_dataset1_copy.iloc[matched_dataset1_copy[
matched_dataset1_copy.RID == sub].index, 1:].values[0]
matched_dates = matched_dates.astype(str)
matched_dates = matched_dates[matched_dates != str(np.nan)]
for date in matched_dates:
date_mask = (raw_dataset1.RID == sub) & (
raw_dataset1.EXAMDATE == date)
row_df = raw_dataset1[date_mask]
matched_data_datset1 = matched_data_datset1.append(row_df)
matched_data_datset1 = matched_data_datset1[cols]
matched_data_datset1.to_csv(
os.path.join(input_path, name1 + '_matched.csv'), sep=',', index=False)
matched_data_datset2 = pd.DataFrame(columns=cols)
for sub in qualfied_matched_subs_dataset2:
matched_dates = matched_dataset2_copy.iloc[matched_dataset2_copy[
matched_dataset2_copy.RID == sub].index, 1:].values[0]
matched_dates = matched_dates.astype(str)
matched_dates = matched_dates[matched_dates != str(np.nan)]
for date in matched_dates:
date_mask = (raw_dataset2.RID == sub) & (
raw_dataset2.EXAMDATE == date)
row_df = raw_dataset2[date_mask]
matched_data_datset2 = matched_data_datset2.append(row_df)
matched_data_datset2 = matched_data_datset2[cols]
matched_data_datset2.to_csv(
os.path.join(input_path, name2 + '_matched.csv'), sep=',', index=False)
|
{
"content_hash": "35b0c7fa2a2206c86ad5a345c2e4f0e0",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 79,
"avg_line_length": 44.80628272251309,
"alnum_prop": 0.6090207992521617,
"repo_name": "ThomasYeoLab/CBIG",
"id": "dd3bffc1bdd2f6c522cb82a17ba6b85cf99c82d6",
"size": "8607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable_projects/predict_phenotypes/An2022_gcVAE/matching/step8_merge_multiple_bins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35378"
},
{
"name": "C",
"bytes": "2076236"
},
{
"name": "C++",
"bytes": "1461097"
},
{
"name": "CSS",
"bytes": "6852"
},
{
"name": "Fortran",
"bytes": "598090"
},
{
"name": "HTML",
"bytes": "287918"
},
{
"name": "Jupyter Notebook",
"bytes": "569200"
},
{
"name": "MATLAB",
"bytes": "10013692"
},
{
"name": "Makefile",
"bytes": "7902"
},
{
"name": "Objective-C",
"bytes": "77"
},
{
"name": "PostScript",
"bytes": "8416"
},
{
"name": "Python",
"bytes": "2499129"
},
{
"name": "R",
"bytes": "33929"
},
{
"name": "Shell",
"bytes": "1923688"
},
{
"name": "TeX",
"bytes": "8993"
},
{
"name": "Vim Script",
"bytes": "2859"
},
{
"name": "XSLT",
"bytes": "19506"
}
],
"symlink_target": ""
}
|
'''
jsonconf
@author: Andrew Philpot
@version 0.5
encode/decode objects in JSON format as configurations
Usage: python jsonconf.py
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
'''
import sys
import getopt
from objcode import ObjectDecoder
import os
import util
from util import echo
VERSION = '0.5'
__version__ = VERSION
# defaults
VERBOSE = False
JSONCONFROOT = """/nfs/isd3/philpot/project/wat/conf"""
def readConfigs(file=None):
if not file:
# possibly should run this through makeJsonconfFile to default the dir
file = "conf/extract.json"
# print >> sys.stderr, "read from file %s" % file
try:
r = util.slurp(open(file))
except IOError as e:
print >> sys.stderr, "JSON file %s not found" % file
raise
# print >> sys.stderr, "have read a string of len %s" % len(r)
try:
# print >> sys.stderr, "trying to decode"
d = ObjectDecoder().decode(r)
# print >> sys.stderr, "decoded to object of type %s" % type(d)
return d
except ValueError as e:
print >> sys.stderr, "Bad JSON syntax in %s [%s]" % (file, e)
raise
def makeJsonconfFile(type, root=None):
if not root:
root = JSONCONFROOT
return os.path.join(root, "conf", "%s.json" % type)
import pprint
def retrieveJson(type, root=None, verbose=False):
if not root:
root = JSONCONFROOT
p = makeJsonconfFile(type, root=root)
# print >> sys.stderr, "rJ file is %s" % p
val = readConfigs(file=p)
if verbose:
s = ("retrieveJson: file=%r\n" % p) + pprint.pformat(val)
print >> sys.stderr, s
return val
def lookupJson(key, jsondict, default=None, type=None):
v = jsondict.get(key, None) or jsondict.get(unicode(key), None)
# print """<pre>jd=%r</pre>""" % jsondict
# print """<pre>key=%s, v=%s</pre>""" % (key, v)
# print """<pre>key=%r, v=%r</pre>""" % (unicode(key), v)
if v:
return v
elif default==None:
return None
elif default=="error":
raise ValueError("No match for key %s type %s" % (key, type))
elif default=="warn":
print >> sys.stderr, "No key %s in %s" % (key, jsondict)
return None
else:
return default
def chooseJson(key, type, root=None, default='error'):
if not root:
root = JSONCONFROOT
choices = retrieveJson(type, root=root)
return lookupJson(key, choices, default=default, type=type)
# 20 March 2013 by Philpot
# default root = None, then if necessary set inside the fn
# why? because definition binds to value, not to variable
def readConfig(conf='test', root=None, type='db', verbose=VERBOSE):
if not root:
root = JSONCONFROOT
# print "entering RC, JCR = %s" % JSONCONFROOT
# print "entering RC, root = %s" % root
if verbose:
print >> sys.stderr, "reading a key %s from %s/%s" % (conf, root, type)
chosen = chooseJson(conf, type, root=root)
if verbose:
print >> sys.stderr, "%s cfg is %s" % (conf, chosen)
return chosen
def readDbConfig(conf='test', root=None, verbose=VERBOSE):
if not root:
root = JSONCONFROOT
return readConfig(conf=conf, type='db', root=root)
class Jsonconf(object):
def __init__(self, filename, verbose=VERBOSE):
'''create Jsonconf'''
self.verbose = verbose
self.filename = filename or 'conf/test.json'
def readConfigs(self):
return readConfigs(self.filename)
def main(argv=None):
'''this is called if run from command line'''
# process command line arguments
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hv",
["echo=", "help",
"verbose"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# default options
my_verbose = VERBOSE
# process options
for o,a in opts:
if o in ("-h","--help"):
print __doc__
sys.exit(0)
if o in ("--echo", ):
print a
if o in ("-v", "--verbose", ):
my_verbose = True
filename = args[0] if args else 'conf/test.json'
jc = Jsonconf(filename, verbose=my_verbose)
print "From %s, read %s" % (filename, jc.readConfigs())
c = chooseJson("trbaux", "db")
print "Choose(trbaux,db) = %s" % c
print "its port is %s" % c.get('port')
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of jsonconf.py
|
{
"content_hash": "7e5612352b9d7d61f4a7cfe6e6e1df01",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 29.844155844155843,
"alnum_prop": 0.5942123585726719,
"repo_name": "philpot/pymod",
"id": "f558cf28ac5a318454a77765dd421cd2d668e286",
"size": "4639",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsonconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528287"
}
],
"symlink_target": ""
}
|
"""subprocess42 is the answer to life the universe and everything.
It has the particularity of having a Popen implementation that can yield output
as it is produced while implementing a timeout and NOT requiring the use of
worker threads.
Example:
Wait for a child process with a timeout, send SIGTERM, wait a grace period
then send SIGKILL:
def wait_terminate_then_kill(proc, timeout, grace):
try:
return proc.wait(timeout)
except subprocess42.TimeoutExpired:
proc.terminate()
try:
return proc.wait(grace)
except subprocess42.TimeoutExpired:
proc.kill()
return proc.wait()
TODO(maruel): Add VOID support like subprocess2.
"""
import collections
import contextlib
import errno
import os
import signal
import sys
import threading
import time
import subprocess
from subprocess import CalledProcessError, PIPE, STDOUT # pylint: disable=W0611
from subprocess import list2cmdline
# Default maxsize argument.
MAX_SIZE = 16384
# Set to True when inhibit_crash_dump() has been called.
_OS_ERROR_REPORTING_INHIBITED = False
if sys.platform == 'win32':
import ctypes
import msvcrt # pylint: disable=F0401
from ctypes import wintypes
from ctypes import windll
# Which to be received depends on how this process was called and outside the
# control of this script. See Popen docstring for more details.
STOP_SIGNALS = (signal.SIGBREAK, signal.SIGTERM)
# Windows processes constants.
# Subset of process priority classes.
# https://docs.microsoft.com/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass
BELOW_NORMAL_PRIORITY_CLASS = 0x4000
IDLE_PRIORITY_CLASS = 0x40
# Constants passed to CreateProcess creationflags argument.
# https://docs.microsoft.com/windows/desktop/api/processthreadsapi/nf-processthreadsapi-createprocessw
CREATE_SUSPENDED = 0x4
CREATE_NEW_CONSOLE = subprocess.CREATE_NEW_CONSOLE
CREATE_NEW_PROCESS_GROUP = subprocess.CREATE_NEW_PROCESS_GROUP
# Job Objects constants and structs.
JobObjectBasicLimitInformation = 2
JobObjectBasicUIRestrictions = 4
JobObjectExtendedLimitInformation = 9
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_basic_limit_information
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x8
JOB_OBJECT_LIMIT_AFFINITY = 0x10
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x400
JOB_OBJECT_LIMIT_JOB_MEMORY = 0x200
JOB_OBJECT_LIMIT_JOB_TIME = 0x4
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000
JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x40
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x20
JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x100
JOB_OBJECT_LIMIT_PROCESS_TIME = 0x2
JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x80
JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x1000
JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x4000
JOB_OBJECT_LIMIT_WORKINGSET = 0x1
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
('PerProcessUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),
('PerJobUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),
('LimitFlags', ctypes.wintypes.DWORD),
('MinimumWorkingSetSize', ctypes.c_size_t),
('MaximumWorkingSetSize', ctypes.c_size_t),
('ActiveProcessLimit', ctypes.wintypes.DWORD),
('Affinity', ctypes.POINTER(ctypes.wintypes.ULONG)),
('PriorityClass', ctypes.wintypes.DWORD),
('SchedulingClass', ctypes.wintypes.DWORD),
]
@property
def info_type(self):
return JobObjectBasicLimitInformation
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-io_counters
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
('ReadOperationCount', ctypes.c_ulonglong),
('WriteOperationCount', ctypes.c_ulonglong),
('OtherOperationCount', ctypes.c_ulonglong),
('ReadTransferCount', ctypes.c_ulonglong),
('WriteTransferCount', ctypes.c_ulonglong),
('OtherTransferCount', ctypes.c_ulonglong),
]
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_extended_limit_information
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),
('IoInfo', IO_COUNTERS),
('ProcessMemoryLimit', ctypes.c_size_t),
('JobMemoryLimit', ctypes.c_size_t),
('PeakProcessMemoryUsed', ctypes.c_size_t),
('PeakJobMemoryUsed', ctypes.c_size_t),
]
@property
def info_type(self):
return JobObjectExtendedLimitInformation
# https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ns-winnt-jobobject_basic_ui_restrictions
JOB_OBJECT_UILIMIT_DESKTOP = 0x40
JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x10
JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x80
JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x20
JOB_OBJECT_UILIMIT_HANDLES = 0x1
class JOBOBJECT_BASIC_UI_RESTRICTIONS(ctypes.Structure):
_fields_ = [('UIRestrictionsClass', ctypes.wintypes.DWORD)]
@property
def info_type(self):
return JobObjectBasicUIRestrictions
def ReadFile(handle, desired_bytes):
"""Calls kernel32.ReadFile()."""
c_read = wintypes.DWORD()
buff = wintypes.create_string_buffer(desired_bytes+1)
# If it fails, the buffer will probably(?) not be affected.
windll.kernel32.ReadFile(
handle, buff, desired_bytes, wintypes.byref(c_read), None)
# NULL terminate it.
buff[c_read.value] = '\x00'
return wintypes.GetLastError(), buff.value
def PeekNamedPipe(handle):
"""Calls kernel32.PeekNamedPipe(). Simplified version."""
c_avail = wintypes.DWORD()
c_message = wintypes.DWORD()
success = windll.kernel32.PeekNamedPipe(
handle, None, 0, None, wintypes.byref(c_avail),
wintypes.byref(c_message))
if not success:
raise OSError(wintypes.GetLastError())
return c_avail.value
def recv_multi_impl(conns, maxsize, timeout):
"""Reads from the first available pipe.
It will immediately return on a closed connection, independent of timeout.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(int(index), str(data), bool(closed)).
"""
assert conns
assert timeout is None or isinstance(timeout, (int, float)), timeout
maxsize = max(maxsize or MAX_SIZE, 1)
# TODO(maruel): Use WaitForMultipleObjects(). Python creates anonymous pipes
# for proc.stdout and proc.stderr but they are implemented as named pipes on
# Windows. Since named pipes are not waitable object, they can't be passed
# as-is to WFMO(). So this means N times CreateEvent(), N times ReadFile()
# and finally WFMO(). This requires caching the events handles in the Popen
# object and remembering the pending ReadFile() calls. This will require
# some re-architecture to store the relevant event handle and OVERLAPPEDIO
# object in Popen or the file object.
start = time.time()
handles = [
(i, msvcrt.get_osfhandle(c.fileno())) for i, c in enumerate(conns)
]
while True:
for index, handle in handles:
try:
avail = min(PeekNamedPipe(handle), maxsize)
if avail:
return index, ReadFile(handle, avail)[1], False
except OSError:
# The pipe closed.
return index, None, True
if timeout is not None and (time.time() - start) >= timeout:
return None, None, False
# Polling rocks.
time.sleep(0.001)
class _JobObject(object):
"""Manages a job object."""
def __init__(self, containment):
# The first process to be added to the job object.
self._proc = None
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-createjobobjectw
self._hjob = ctypes.windll.kernel32.CreateJobObjectW(None, None)
if not self._hjob:
# pylint: disable=undefined-variable
raise WindowsError(
'Failed to create job object: %s' % ctypes.GetLastError())
# TODO(maruel): Use a completion port to listen to messages as described
# at
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_associate_completion_port
# TODO(maruel): Enable configuring the limit, like maximum number of
# processes, working set size.
obj = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
obj.BasicLimitInformation.LimitFlags = (
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION|
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
if containment.limit_processes:
obj.BasicLimitInformation.ActiveProcessLimit = (
containment.limit_processes)
obj.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_ACTIVE_PROCESS
if containment.limit_total_committed_memory:
obj.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY
obj.JobMemoryLimit = containment.limit_total_committed_memory
self._set_information(obj)
# Add UI limitations.
# TODO(maruel): The limitations currently used are based on Chromium's
# testing needs. For example many unit tests use the clipboard, or change
# the display settings (!)
obj = JOBOBJECT_BASIC_UI_RESTRICTIONS(
UIRestrictionsClass=
JOB_OBJECT_UILIMIT_DESKTOP|
JOB_OBJECT_UILIMIT_EXITWINDOWS|
JOB_OBJECT_UILIMIT_GLOBALATOMS|
JOB_OBJECT_UILIMIT_HANDLES)
self._set_information(obj)
def close(self):
if self._hjob:
ctypes.windll.kernel32.CloseHandle(self._hjob)
self._hjob = None
def kill(self, exit_code):
"""Return True if the TerminateJobObject call succeeded, or not operation
was done.
"""
if not self._hjob:
return True
# "Kill" the job object instead of the process.
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-terminatejobobject
return bool(
ctypes.windll.kernel32.TerminateJobObject(self._hjob, exit_code))
def assign_proc(self, proc):
"""Assigns the process handle to the job object."""
if not ctypes.windll.kernel32.AssignProcessToJobObject(
self._hjob, int(proc._handle)):
# pylint: disable=undefined-variable
raise WindowsError(
'Failed to assign job object: %s' % ctypes.GetLastError())
if not ctypes.windll.kernel32.ResumeThread(int(proc._handle_thread)):
# pylint: disable=undefined-variable
raise WindowsError(
'Failed to resume child process thread: %s' %
ctypes.GetLastError())
def _set_information(self, obj):
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-setinformationjobobject
if not ctypes.windll.kernel32.SetInformationJobObject(
self._hjob, obj.info_type, ctypes.byref(obj), ctypes.sizeof(obj)):
# pylint: disable=undefined-variable
raise WindowsError(
'Failed to adjust job object with type %s: %s' %
(obj.info_type, ctypes.GetLastError()))
else:
import fcntl # pylint: disable=F0401
import select
# Signals that mean this process should exit quickly.
STOP_SIGNALS = (signal.SIGINT, signal.SIGTERM)
def recv_multi_impl(conns, maxsize, timeout):
"""Reads from the first available pipe.
It will immediately return on a closed connection, independent of timeout.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(int(index), str(data), bool(closed)).
"""
assert conns
assert timeout is None or isinstance(timeout, (int, float)), timeout
maxsize = max(maxsize or MAX_SIZE, 1)
# select(timeout=0) will block, it has to be a value > 0.
if timeout == 0:
timeout = 0.001
try:
r, _, _ = select.select(conns, [], [], timeout)
except select.error:
r = None
if not r:
return None, None, False
conn = r[0]
# Temporarily make it non-blocking.
# TODO(maruel): This is not very efficient when the caller is doing this in
# a loop. Add a mechanism to have the caller handle this.
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
# pylint: disable=E1101
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
try:
data = conn.read(maxsize)
except IOError as e:
# On posix, this means the read would block.
if e.errno == errno.EAGAIN:
return conns.index(conn), None, False
raise e
if not data:
# On posix, this means the channel closed.
return conns.index(conn), None, True
return conns.index(conn), data, False
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
class TimeoutExpired(Exception):
"""Compatible with python3 subprocess."""
def __init__(self, cmd, timeout, output=None, stderr=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
# Non-standard:
self.stderr = stderr
super(TimeoutExpired, self).__init__(str(self))
def __str__(self):
return "Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)
class Containment(object):
"""Defines the containment used to run the process.
On Windows, this is done via a Job Object.
https://docs.microsoft.com/en-us/windows/desktop/procthread/job-objects
"""
# AUTO will use containment if possible, but will not fail if not adequate on
# this operating system.
#
# For example, job objects cannot be nested on Windows 7 / Windows Server 2008
# and earlier, thus AUTO means NONE on these platforms. Windows 8 and Window
# Server 2012 and later support nest job objects, thus AUTO means ENABLED on
# these platforms.
# See https://docs.microsoft.com/en-us/windows/desktop/procthread/job-objects
# cgroups will be added.
NONE, AUTO, JOB_OBJECT = range(3)
NAMES = {
NONE: 'NONE',
AUTO: 'AUTO',
JOB_OBJECT: 'JOB_OBJECT',
}
def __init__(
self,
containment_type=NONE,
limit_processes=0,
limit_total_committed_memory=0):
self.containment_type = containment_type
# Limit on the number of active processes.
self.limit_processes = limit_processes
self.limit_total_committed_memory = limit_total_committed_memory
def __eq__(self, rhs):
if not rhs:
return False
return (
self.containment_type == rhs.containment_type and
self.limit_processes == rhs.limit_processes and
self.limit_total_committed_memory == rhs.limit_total_committed_memory)
def __str__(self):
return 'Containment<%s, %s, %s>' % (
self.NAMES[self.containment_type],
self.limit_processes,
self.limit_total_committed_memory)
def __repr__(self):
return self.__str__()
class Popen(subprocess.Popen):
"""Adds timeout support on stdout and stderr.
Inspired by
http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
Unlike subprocess, yield_any(), recv_*(), communicate() will close stdout and
stderr once the child process closes them, after all the data is read.
Mutated behavior:
- args: transparently encode('utf-8') any unicode items.
- cwd: transparently encode('utf-8') if unicode.
- env: transparently encode('utf-8') any unicode keys or values.
Additional arguments:
- detached: If True, the process is created in a new process group. On
Windows, use CREATE_NEW_PROCESS_GROUP. On posix, use os.setpgid(0, 0).
- lower_priority: reduce the process priority a bit.
- containment: Containment instance or None. When using containment, one of
communicate(), poll(), wait(), yield_any(), yield_any_line() must be
used otherwise a kernel handle may leak.
Additional members:
- start: timestamp when this process started.
- end: timestamp when this process exited, as seen by this process.
- detached: If True, the child process was started as a detached process.
- gid: process group id, if any.
- duration: time in seconds the process lasted.
Additional methods:
- yield_any(): yields output until the process terminates.
- recv_any(): reads from stdout and/or stderr with optional timeout.
- recv_out() & recv_err(): specialized version of recv_any().
"""
# subprocess.Popen.__init__() is not threadsafe; there is a race between
# creating the exec-error pipe for the child and setting it to CLOEXEC during
# which another thread can fork and cause the pipe to be inherited by its
# descendents, which will cause the current Popen to hang until all those
# descendents exit. Protect this with a lock so that only one fork/exec can
# happen at a time.
popen_lock = threading.Lock()
def __init__(self, args, **kwargs):
# Windows version of subprocess.Popen() really doens't like unicode. In
# practice we should use the current ANSI code page, but settle for utf-8
# across all OSes for consistency.
to_str = lambda i: i if isinstance(i, str) else i.encode('utf-8')
args = [to_str(i) for i in args]
if kwargs.get('cwd') is not None:
kwargs['cwd'] = to_str(kwargs['cwd'])
if kwargs.get('env'):
kwargs['env'] = {
to_str(k): to_str(v) for k, v in kwargs['env'].items()
}
# Set via contrived monkey patching below, because stdlib doesn't expose
# thread handle. Only set on Windows.
self._handle_thread = None
# Will be set by super constructor but may be accessed in failure modes by
# _cleanup().
self._handle = None
self._job = None
self.detached = kwargs.pop('detached', False)
if self.detached:
if sys.platform == 'win32':
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | CREATE_NEW_PROCESS_GROUP
else:
old_preexec_fn_1 = kwargs.get('preexec_fn')
def new_preexec_fn_1():
if old_preexec_fn_1:
old_preexec_fn_1()
os.setpgid(0, 0)
kwargs['preexec_fn'] = new_preexec_fn_1
if kwargs.pop('lower_priority', False):
if sys.platform == 'win32':
# TODO(maruel): If already in this class, it should use
# IDLE_PRIORITY_CLASS.
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | BELOW_NORMAL_PRIORITY_CLASS
else:
old_preexec_fn_2 = kwargs.get('preexec_fn')
def new_preexec_fn_2():
if old_preexec_fn_2:
old_preexec_fn_2()
os.nice(1)
kwargs['preexec_fn'] = new_preexec_fn_2
self.containment = kwargs.pop('containment', None) or Containment()
if self.containment.containment_type != Containment.NONE:
if self.containment.containment_type == Containment.JOB_OBJECT:
if sys.platform != 'win32':
raise NotImplementedError(
'containment is not implemented on this platform')
if sys.platform == 'win32':
# May throw an WindowsError.
# pylint: disable=undefined-variable
self._job = _JobObject(self.containment)
# In this case, start the process suspended, so we can assign the job
# object, then resume it.
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | CREATE_SUSPENDED
self.end = None
self.gid = None
self.start = time.time()
try:
with self.popen_lock:
if sys.platform == 'win32':
# We need the thread handle, save it.
old = subprocess._subprocess.CreateProcess
class FakeHandle(object):
def Close(self):
pass
def patch_CreateProcess(*args, **kwargs):
hp, ht, pid, tid = old(*args, **kwargs)
# Save the thread handle, and return a fake one that
# _execute_child() will close indiscriminally.
self._handle_thread = ht
return hp, FakeHandle(), pid, tid
subprocess._subprocess.CreateProcess = patch_CreateProcess
try:
super(Popen, self).__init__(args, **kwargs)
finally:
if sys.platform == 'win32':
subprocess._subprocess.CreateProcess = old
except:
self._cleanup()
raise
self.args = args
if self.detached and sys.platform != 'win32':
try:
self.gid = os.getpgid(self.pid)
except OSError:
# sometimes the process can run+finish before we collect its pgid. fun.
pass
if self._job:
try:
self._job.assign_proc(self)
except OSError:
self.kill()
self.wait()
def duration(self):
"""Duration of the child process.
It is greater or equal to the actual time the child process ran. It can be
significantly higher than the real value if neither .wait() nor .poll() was
used.
"""
return (self.end or time.time()) - self.start
# pylint: disable=arguments-differ,redefined-builtin
def communicate(self, input=None, timeout=None):
"""Implements python3's timeout support.
Unlike wait(), timeout=0 is considered the same as None.
Returns:
tuple of (stdout, stderr).
Raises:
- TimeoutExpired when more than timeout seconds were spent waiting for the
process.
"""
if not timeout:
return super(Popen, self).communicate(input=input)
assert isinstance(timeout, (int, float)), timeout
if self.stdin or self.stdout or self.stderr:
stdout = '' if self.stdout else None
stderr = '' if self.stderr else None
t = None
if input is not None:
assert self.stdin, (
'Can\'t use communicate(input) if not using '
'Popen(stdin=subprocess42.PIPE')
# TODO(maruel): Switch back to non-threading.
def write():
try:
self.stdin.write(input)
except IOError:
pass
t = threading.Thread(name='Popen.communicate', target=write)
t.daemon = True
t.start()
try:
if self.stdout or self.stderr:
start = time.time()
end = start + timeout
def remaining():
return max(end - time.time(), 0)
for pipe, data in self.yield_any(timeout=remaining):
if pipe is None:
raise TimeoutExpired(self.args, timeout, stdout, stderr)
assert pipe in ('stdout', 'stderr'), pipe
if pipe == 'stdout':
stdout += data
else:
stderr += data
else:
# Only stdin is piped.
self.wait(timeout=timeout)
finally:
if t:
try:
self.stdin.close()
except IOError:
pass
t.join()
else:
# No pipe. The user wanted to use wait().
self.wait(timeout=timeout)
return None, None
# Indirectly initialize self.end.
self.wait()
return stdout, stderr
def wait(self, timeout=None,
poll_initial_interval=0.001,
poll_max_interval=0.05): # pylint: disable=arguments-differ
"""Implements python3's timeout support.
Raises:
- TimeoutExpired when more than timeout seconds were spent waiting for the
process.
"""
assert timeout is None or isinstance(timeout, (int, float)), timeout
if timeout is None:
super(Popen, self).wait()
elif self.returncode is None:
if sys.platform == 'win32':
WAIT_TIMEOUT = 258
result = subprocess._subprocess.WaitForSingleObject(
self._handle, int(timeout * 1000))
if result == WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = subprocess._subprocess.GetExitCodeProcess(
self._handle)
else:
# If you think the following code is horrible, it's because it is
# inspired by python3's stdlib.
end = time.time() + timeout
delay = poll_initial_interval
while True:
try:
pid, sts = subprocess._eintr_retry_call(
os.waitpid, self.pid, os.WNOHANG)
except OSError as e:
if e.errno != errno.ECHILD:
raise
pid = self.pid
sts = 0
if pid == self.pid:
# This sets self.returncode.
self._handle_exitstatus(sts)
break
remaining = end - time.time()
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, poll_max_interval)
time.sleep(delay)
if not self.end:
# communicate() uses wait() internally.
self.end = time.time()
self._cleanup()
return self.returncode
def poll(self):
ret = super(Popen, self).poll()
if ret is not None and not self.end:
self.end = time.time()
# This may kill all children processes.
self._cleanup()
return ret
def yield_any_line(self, **kwargs):
"""Yields lines until the process terminates.
Like yield_any, but yields lines.
"""
return split(self.yield_any(**kwargs))
def yield_any(self, maxsize=None, timeout=None):
"""Yields output until the process terminates.
Unlike wait(), does not raise TimeoutExpired.
Yields:
(pipename, data) where pipename is either 'stdout', 'stderr' or None in
case of timeout or when the child process closed one of the pipe(s) and
all pending data on the pipe was read.
Arguments:
- maxsize: See recv_any(). Can be a callable function.
- timeout: If None, the call is blocking. If set, yields None, None if no
data is available within |timeout| seconds. It resets itself after
each yield. Can be a callable function.
"""
assert self.stdout or self.stderr
if timeout is not None:
# timeout=0 effectively means that the pipe is continuously polled.
if isinstance(timeout, (int, float)):
assert timeout >= 0, timeout
old_timeout = timeout
timeout = lambda: old_timeout
else:
assert callable(timeout), timeout
if maxsize is not None and not callable(maxsize):
assert isinstance(maxsize, (int, float)), maxsize
last_yield = time.time()
while self.poll() is None:
to = timeout() if timeout else None
if to is not None:
to = max(to - (time.time() - last_yield), 0)
t, data = self.recv_any(
maxsize=maxsize() if callable(maxsize) else maxsize, timeout=to)
if data or to == 0:
yield t, data
last_yield = time.time()
# Read all remaining output in the pipes.
# There is 3 cases:
# - pipes get closed automatically by the calling process before it exits
# - pipes are closed automated by the OS
# - pipes are kept open due to grand-children processes outliving the
# children process.
while True:
ms = maxsize
if callable(maxsize):
ms = maxsize()
# timeout=0 is mainly to handle the case where a grand-children process
# outlives the process started.
t, data = self.recv_any(maxsize=ms, timeout=0)
if not data:
break
yield t, data
def recv_any(self, maxsize=None, timeout=None):
"""Reads from the first pipe available from stdout and stderr.
Unlike wait(), does not throw TimeoutExpired.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(pipename or None, str(data)). pipename is one of 'stdout' or
'stderr'.
"""
# recv_multi_impl will early exit on a closed connection. Loop accordingly
# to simplify call sites.
while True:
pipes = [
x for x in ((self.stderr, 'stderr'), (self.stdout, 'stdout')) if x[0]
]
# If both stdout and stderr have the exact file handle, they are
# effectively the same pipe. Deduplicate it since otherwise it confuses
# recv_multi_impl().
if len(pipes) == 2 and self.stderr.fileno() == self.stdout.fileno():
pipes.pop(0)
if not pipes:
return None, None
start = time.time()
conns, names = zip(*pipes)
index, data, closed = recv_multi_impl(conns, maxsize, timeout)
if index is None:
return index, data
if closed:
self._close(names[index])
if not data:
# Loop again. The other pipe may still be open.
if timeout:
timeout -= (time.time() - start)
continue
if self.universal_newlines and data:
data = self._translate_newlines(data)
return names[index], data
def recv_out(self, maxsize=None, timeout=None):
"""Reads from stdout synchronously with timeout."""
return self._recv('stdout', maxsize, timeout)
def recv_err(self, maxsize=None, timeout=None):
"""Reads from stderr synchronously with timeout."""
return self._recv('stderr', maxsize, timeout)
def terminate(self):
"""Tries to do something saner on Windows that the stdlib.
Windows:
self.detached/CREATE_NEW_PROCESS_GROUP determines what can be used:
- If set, only SIGBREAK can be sent and it is sent to a single process.
- If not set, in theory only SIGINT can be used and *all processes* in
the processgroup receive it. In practice, we just kill the process.
See http://msdn.microsoft.com/library/windows/desktop/ms683155.aspx
The default on Windows is to call TerminateProcess() always, which is not
useful.
On Posix, always send SIGTERM.
"""
try:
if sys.platform == 'win32' and self.detached:
return self.send_signal(signal.CTRL_BREAK_EVENT)
super(Popen, self).terminate()
except OSError:
# The function will throw if the process terminated in-between. Swallow
# this.
pass
def kill(self):
"""Kills the process and its children if possible.
Swallows exceptions and return True on success.
This process may be asynchronous. The user should still call wait() to
ensure the process is indeed terminated.
"""
if self._job:
# Use the equivalent of SIGKILL on linux. signal.SIGKILL is not available
# on Windows.
return self._job.kill(-9)
if self.returncode is not None:
# If a return code was recorded, it means there's nothing to kill as there
# was no containment.
return True
if self.gid:
try:
os.killpg(self.gid, signal.SIGKILL)
except OSError:
return False
else:
try:
super(Popen, self).kill()
except OSError:
return False
return True
def _close(self, which):
"""Closes either stdout or stderr."""
getattr(self, which).close()
setattr(self, which, None)
def _cleanup(self):
"""Makes sure resources are not leaked."""
if self._job:
# This may kill all children processes.
self._job.close()
self._job = None
if self._handle_thread:
self._handle_thread.Close()
self._handle_thread = None
if self._handle:
# self._handle is deleted via __del__ but when it happens is
# non-deterministic, so do it earlier.
self._handle.Close()
self._handle = None
def _recv(self, which, maxsize, timeout):
"""Reads from one of stdout or stderr synchronously with timeout."""
conn = getattr(self, which)
if conn is None:
return None
_, data, closed = recv_multi_impl([conn], maxsize, timeout)
if closed:
self._close(which)
if self.universal_newlines and data:
data = self._translate_newlines(data)
return data
@contextlib.contextmanager
def set_signal_handler(signals, handler):
"""Temporarilly override signals handler.
Useful when waiting for a child process to handle signals like SIGTERM, so the
signal can be propagated to the child process.
"""
previous = {s: signal.signal(s, handler) for s in signals}
try:
yield
finally:
for sig, h in previous.items():
signal.signal(sig, h)
def call(*args, **kwargs):
"""Adds support for timeout."""
timeout = kwargs.pop('timeout', None)
return Popen(*args, **kwargs).wait(timeout)
def check_call(*args, **kwargs):
"""Adds support for timeout."""
retcode = call(*args, **kwargs)
if retcode:
raise CalledProcessError(retcode, kwargs.get('args') or args[0])
return 0
def check_output(*args, **kwargs):
"""Adds support for timeout."""
timeout = kwargs.pop('timeout', None)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *args, **kwargs)
output, _ = process.communicate(timeout=timeout)
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, kwargs.get('args') or args[0], output)
return output
def call_with_timeout(args, timeout, **kwargs):
"""Runs an executable; kill it in case of timeout."""
proc = Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kwargs)
try:
out, err = proc.communicate(timeout=timeout)
except TimeoutExpired as e:
out = e.output
err = e.stderr
proc.kill()
proc.wait()
return out, err, proc.returncode, proc.duration()
def inhibit_os_error_reporting():
"""Inhibits error reporting UI and core files.
This function should be called as early as possible in the process lifetime.
"""
global _OS_ERROR_REPORTING_INHIBITED
if not _OS_ERROR_REPORTING_INHIBITED:
_OS_ERROR_REPORTING_INHIBITED = True
if sys.platform == 'win32':
# Windows has a bad habit of opening a dialog when a console program
# crashes, rather than just letting it crash. Therefore, when a program
# crashes on Windows, we don't find out until the build step times out.
# This code prevents the dialog from appearing, so that we find out
# immediately and don't waste time waiting for a user to close the dialog.
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
SEM_FAILCRITICALERRORS = 1
SEM_NOGPFAULTERRORBOX = 2
SEM_NOALIGNMENTFAULTEXCEPT = 0x8000
ctypes.windll.kernel32.SetErrorMode(
SEM_FAILCRITICALERRORS|SEM_NOGPFAULTERRORBOX|
SEM_NOALIGNMENTFAULTEXCEPT)
# TODO(maruel): Other OSes.
# - OSX, need to figure out a way to make the following process tree local:
# defaults write com.apple.CrashReporter UseUNC 1
# defaults write com.apple.CrashReporter DialogType none
# - Ubuntu, disable apport if needed.
def split(data, sep='\n'):
"""Splits pipe data by |sep|. Does some buffering.
For example, [('stdout', 'a\nb'), ('stdout', '\n'), ('stderr', 'c\n')] ->
[('stdout', 'a'), ('stdout', 'b'), ('stderr', 'c')].
Args:
data: iterable of tuples (pipe_name, bytes).
Returns:
An iterator of tuples (pipe_name, bytes) where bytes is the input data
but split by sep into separate tuples.
"""
# A dict {pipe_name -> list of pending chunks without separators}
pending_chunks = collections.defaultdict(list)
for pipe_name, chunk in data:
if chunk is None:
# Happens if a pipe is closed.
continue
pending = pending_chunks[pipe_name]
start = 0 # offset in chunk to start |sep| search from
while start < len(chunk):
j = chunk.find(sep, start)
if j == -1:
pending_chunks[pipe_name].append(chunk[start:])
break
to_emit = chunk[start:j]
start = j + 1
if pending:
# prepend and forget
to_emit = ''.join(pending) + to_emit
pending = []
pending_chunks[pipe_name] = pending
yield pipe_name, to_emit
# Emit remaining chunks that don't end with separators as is.
for pipe_name, chunks in sorted(pending_chunks.items()):
if chunks:
yield pipe_name, ''.join(chunks)
|
{
"content_hash": "72e93bbea9e3c8ec7d0ed0b72367f4eb",
"timestamp": "",
"source": "github",
"line_count": 1042,
"max_line_length": 106,
"avg_line_length": 34.71017274472169,
"alnum_prop": 0.6533952665339526,
"repo_name": "endlessm/chromium-browser",
"id": "4205da14aa18ca9abef0af57b404432bec9d4f9e",
"size": "36342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/swarming_client/utils/subprocess42.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="alignsrc", parent_name="box.hoverlabel", **kwargs):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "6f20d68f3ea4ea3ef26000d8624e5f12",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 36.72727272727273,
"alnum_prop": 0.6262376237623762,
"repo_name": "plotly/plotly.py",
"id": "b62f55c0350358bdf2e176a66a5a89dd0d6a68d8",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/box/hoverlabel/_alignsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from desktop.models import Document
try:
import json
except ImportError:
import simplejson as json
import logging
import shutil
import time
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.forms.formsets import formset_factory
from django.forms.models import inlineformset_factory
from django.http import HttpResponse
from django.shortcuts import redirect
from django.utils.functional import curry
from django.utils.http import http_date
from django.utils.translation import ugettext as _, activate as activate_translation
from desktop.lib.django_util import render, extract_field_data
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.lib.rest.http_client import RestException
from liboozie.submittion import Submission
from filebrowser.lib.archives import archive_factory
from oozie.decorators import check_job_access_permission, check_job_edition_permission,\
check_dataset_access_permission, check_dataset_edition_permission
from oozie.importlib.workflows import import_workflow as _import_workflow
from oozie.management.commands import oozie_setup
from oozie.models import Workflow, History, Coordinator,\
Dataset, DataInput, DataOutput,\
ACTION_TYPES, Bundle, BundledCoordinator, Job
from oozie.forms import WorkflowForm, CoordinatorForm, DatasetForm,\
DataInputForm, DataOutputForm, LinkForm,\
DefaultLinkForm, ParameterForm, ImportWorkflowForm,\
NodeForm, BundleForm, BundledCoordinatorForm, design_form_by_type
LOG = logging.getLogger(__name__)
def list_workflows(request):
data = Document.objects.available(Workflow, request.user)
data = [job for job in data if job.managed]
return render('editor/list_workflows.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def list_trashed_workflows(request):
data = Document.objects.trashed(Workflow, request.user)
data = [job for job in data if job.managed]
return render('editor/list_trashed_workflows.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def list_coordinators(request, workflow_id=None):
data = Document.objects.available(Coordinator, request.user)
if workflow_id is not None:
data = [job for job in data if job.workflow.id == workflow_id]
return render('editor/list_coordinators.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def list_trashed_coordinators(request):
data = Document.objects.trashed(Coordinator, request.user)
return render('editor/list_trashed_coordinators.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def list_bundles(request):
data = Document.objects.available(Bundle, request.user)
return render('editor/list_bundles.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def list_trashed_bundles(request):
data = Document.objects.trashed(Bundle, request.user)
return render('editor/list_trashed_bundles.mako', request, {
'jobs': data,
'json_jobs': json.dumps([job.id for job in data]),
})
def create_workflow(request):
workflow = Workflow.objects.new_workflow(request.user)
if request.method == 'POST':
workflow_form = WorkflowForm(request.POST, instance=workflow)
if workflow_form.is_valid():
wf = workflow_form.save()
wf.managed = True
Workflow.objects.initialize(wf, request.fs)
return redirect(reverse('oozie:edit_workflow', kwargs={'workflow': workflow.id}))
else:
request.error(_('Errors on the form: %s') % workflow_form.errors)
else:
workflow_form = WorkflowForm(instance=workflow)
return render('editor/create_workflow.mako', request, {
'workflow_form': workflow_form,
'workflow': workflow,
})
def import_workflow(request):
workflow = Workflow.objects.new_workflow(request.user)
if request.method == 'POST':
workflow_form = ImportWorkflowForm(request.POST, request.FILES, instance=workflow)
if workflow_form.is_valid():
if workflow_form.cleaned_data.get('resource_archive'):
# Upload resources to workspace
source = workflow_form.cleaned_data.get('resource_archive')
if source.name.endswith('.zip'):
workflow.save()
Workflow.objects.initialize(workflow, request.fs)
temp_path = archive_factory(source).extract()
request.fs.copyFromLocal(temp_path, workflow.deployment_dir)
shutil.rmtree(temp_path)
else:
raise PopupException(_('Archive should be a Zip.'))
else:
workflow.save()
Workflow.objects.initialize(workflow, request.fs)
workflow.managed = True
workflow.save()
workflow_definition = workflow_form.cleaned_data['definition_file'].read()
try:
_import_workflow(fs=request.fs, workflow=workflow, workflow_definition=workflow_definition)
request.info(_('Workflow imported'))
return redirect(reverse('oozie:edit_workflow', kwargs={'workflow': workflow.id}))
except Exception, e:
request.error(_('Could not import workflow: %s' % e))
Workflow.objects.destroy(workflow, request.fs)
raise PopupException(_('Could not import workflow.'), detail=e)
else:
request.error(_('Errors on the form: %s') % workflow_form.errors)
else:
workflow_form = ImportWorkflowForm(instance=workflow)
return render('editor/import_workflow.mako', request, {
'workflow_form': workflow_form,
'workflow': workflow,
})
@check_job_access_permission()
def export_workflow(request, workflow):
zip_file = workflow.compress(maping=dict([(param['name'], param['value']) for param in workflow.find_all_parameters()]))
response = HttpResponse(mimetype="application/zip")
response["Last-Modified"] = http_date(time.time())
response["Content-Length"] = len(zip_file.getvalue())
response['Content-Disposition'] = 'attachment; filename="workflow-%s-%d.zip"' % (workflow.name, workflow.id)
response.write(zip_file.getvalue())
return response
@check_job_access_permission()
def edit_workflow(request, workflow):
history = History.objects.filter(submitter=request.user, job=workflow).order_by('-submission_date')
workflow_form = WorkflowForm(instance=workflow)
user_can_access_job = workflow.can_read(request.user)
user_can_edit_job = workflow.is_editable(request.user)
return render('editor/edit_workflow.mako', request, {
'workflow_form': workflow_form,
'workflow': workflow,
'history': history,
'user_can_access_job': user_can_access_job,
'user_can_edit_job': user_can_edit_job,
'job_properties': extract_field_data(workflow_form['job_properties']),
'link_form': LinkForm(),
'default_link_form': DefaultLinkForm(action=workflow.start),
'node_form': NodeForm(),
'action_forms': [(node_type, design_form_by_type(node_type, request.user, workflow)())
for node_type in ACTION_TYPES.iterkeys()]
})
def delete_workflow(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
skip_trash = 'skip_trash' in request.GET
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Job.objects.can_read_or_exception(request, job_id)
Job.objects.can_edit_or_exception(request, job)
if skip_trash:
Workflow.objects.destroy(job, request.fs)
else:
job.workflow.delete(skip_trash=False)
if skip_trash:
request.info(_('Workflow(s) deleted.'))
else:
request.info(_('Workflow(s) trashed.'))
return redirect(reverse('oozie:list_workflows'))
def restore_workflow(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Document.objects.can_read_or_exception(request.user, Job, job_id)
Job.objects.can_edit_or_exception(request, job)
job.workflow.restore()
request.info(_('Workflow(s) restored.'))
return redirect(reverse('oozie:list_workflows'))
@check_job_access_permission()
def clone_workflow(request, workflow):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
clone = workflow.clone(request.fs, request.user)
response = {'url': reverse('oozie:edit_workflow', kwargs={'workflow': clone.id})}
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
def submit_workflow(request, workflow):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
job_id = _submit_workflow(request.user, request.fs, request.jt, workflow, mapping)
request.info(_('Workflow submitted'))
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = workflow.find_all_parameters()
initial_params = ParameterForm.get_initial_params(dict([(param['name'], param['value']) for param in parameters]))
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'action': reverse('oozie:submit_workflow', kwargs={'workflow': workflow.id})
}, force_template=True).content
return HttpResponse(json.dumps(popup), mimetype="application/json")
def _submit_workflow(user, fs, jt, workflow, mapping):
try:
submission = Submission(user, workflow, fs, jt, mapping)
job_id = submission.run()
History.objects.create_from_submission(submission)
return job_id
except RestException, ex:
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail):
detail = '%s: %s' % (_('The Oozie server is not running'), detail)
LOG.error(smart_str(detail))
raise PopupException(_("Error submitting workflow %s") % (workflow,), detail=detail)
return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id}))
@check_job_access_permission()
def schedule_workflow(request, workflow):
if Coordinator.objects.available().filter(workflow=workflow).exists():
request.info(_('You already have some coordinators for this workflow. Submit one or create a new one.'))
return list_coordinators(request, workflow_id=workflow.id)
else:
return create_coordinator(request, workflow=workflow.id)
@check_job_access_permission()
def create_coordinator(request, workflow=None):
if workflow is not None:
coordinator = Coordinator(owner=request.user, schema_version="uri:oozie:coordinator:0.2", workflow=workflow)
else:
coordinator = Coordinator(owner=request.user, schema_version="uri:oozie:coordinator:0.2")
if request.method == 'POST':
coordinator_form = CoordinatorForm(request.POST, instance=coordinator, user=request.user)
if coordinator_form.is_valid():
coordinator = coordinator_form.save()
Document.objects.link(coordinator, owner=coordinator.owner, name=coordinator.name, description=coordinator.description)
return redirect(reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id}) + "#step3")
else:
request.error(_('Errors on the form: %s') % coordinator_form.errors)
else:
coordinator_form = CoordinatorForm(instance=coordinator, user=request.user)
return render('editor/create_coordinator.mako', request, {
'coordinator': coordinator,
'coordinator_form': coordinator_form,
})
def delete_coordinator(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
skip_trash = 'skip_trash' in request.GET
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Job.objects.can_read_or_exception(request, job_id)
Job.objects.can_edit_or_exception(request, job)
if skip_trash:
Submission(request.user, job, request.fs, {}).remove_deployment_dir()
job.delete(skip_trash=skip_trash)
if skip_trash:
request.info(_('Coordinator(s) deleted.'))
else:
request.info(_('Coordinator(s) trashed.'))
return redirect(reverse('oozie:list_coordinators'))
def restore_coordinator(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Job.objects.can_read_or_exception(request, job_id)
Job.objects.can_edit_or_exception(request, job)
job.restore()
request.info(_('Coordinator(s) restored.'))
return redirect(reverse('oozie:list_coordinators'))
@check_job_access_permission()
@check_job_edition_permission(True)
def edit_coordinator(request, coordinator):
history = History.objects.filter(submitter=request.user, job=coordinator).order_by('-submission_date')
DatasetFormSet = inlineformset_factory(Coordinator, Dataset, form=DatasetForm, max_num=0, can_order=False, can_delete=True)
DataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, max_num=0, can_order=False, can_delete=True)
DataInputFormSet.form = staticmethod(curry(DataInputForm, coordinator=coordinator))
DataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, max_num=0, can_order=False, can_delete=True)
DataOutputFormSet.form = staticmethod(curry(DataOutputForm, coordinator=coordinator))
dataset = Dataset(coordinator=coordinator)
dataset_form = DatasetForm(instance=dataset, prefix='create')
NewDataInputFormSet = inlineformset_factory(Coordinator, DataInput, form=DataInputForm, extra=0, can_order=False, can_delete=False)
NewDataInputFormSet.form = staticmethod(curry(DataInputForm, coordinator=coordinator))
NewDataOutputFormSet = inlineformset_factory(Coordinator, DataOutput, form=DataOutputForm, extra=0, can_order=False, can_delete=False)
NewDataOutputFormSet.form = staticmethod(curry(DataOutputForm, coordinator=coordinator))
if request.method == 'POST':
coordinator_form = CoordinatorForm(request.POST, instance=coordinator, user=request.user)
dataset_formset = DatasetFormSet(request.POST, request.FILES, instance=coordinator)
data_input_formset = DataInputFormSet(request.POST, request.FILES, instance=coordinator)
data_output_formset = DataOutputFormSet(request.POST, request.FILES, instance=coordinator)
new_data_input_formset = NewDataInputFormSet(request.POST, request.FILES, instance=coordinator, prefix='input')
new_data_output_formset = NewDataOutputFormSet(request.POST, request.FILES, instance=coordinator, prefix='output')
if coordinator_form.is_valid() and dataset_formset.is_valid() and data_input_formset.is_valid() and data_output_formset.is_valid() \
and new_data_input_formset.is_valid() and new_data_output_formset.is_valid():
coordinator = coordinator_form.save()
dataset_formset.save()
data_input_formset.save()
data_output_formset.save()
new_data_input_formset.save()
new_data_output_formset.save()
request.info(_('Coordinator saved.'))
return redirect(reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id}))
else:
coordinator_form = CoordinatorForm(instance=coordinator, user=request.user)
dataset_formset = DatasetFormSet(instance=coordinator)
data_input_formset = DataInputFormSet(instance=coordinator)
data_output_formset = DataOutputFormSet(instance=coordinator)
new_data_input_formset = NewDataInputFormSet(queryset=DataInput.objects.none(), instance=coordinator, prefix='input')
new_data_output_formset = NewDataOutputFormSet(queryset=DataOutput.objects.none(), instance=coordinator, prefix='output')
return render('editor/edit_coordinator.mako', request, {
'coordinator': coordinator,
'coordinator_form': coordinator_form,
'dataset_formset': dataset_formset,
'data_input_formset': data_input_formset,
'data_output_formset': data_output_formset,
'dataset': dataset,
'dataset_form': dataset_form,
'new_data_input_formset': new_data_input_formset,
'new_data_output_formset': new_data_output_formset,
'history': history
})
@check_job_access_permission()
@check_job_edition_permission()
def create_coordinator_dataset(request, coordinator):
"""Returns {'status' 0/1, data:html or url}"""
dataset = Dataset(coordinator=coordinator)
response = {'status': -1, 'data': 'None'}
if request.method == 'POST':
dataset_form = DatasetForm(request.POST, instance=dataset, prefix='create')
if dataset_form.is_valid():
dataset_form.save()
response['status'] = 0
response['data'] = reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id}) + "#listDataset"
request.info(_('Dataset created'))
else:
## Bad
response['data'] = _('A POST request is required.')
if response['status'] != 0:
response['data'] = render('editor/create_coordinator_dataset.mako', request, {
'coordinator': coordinator,
'dataset_form': dataset_form,
'dataset': dataset,
}, force_template=True).content
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_dataset_access_permission
@check_dataset_edition_permission()
def edit_coordinator_dataset(request, dataset):
"""Returns HTML for modal to edit datasets"""
response = {'status': -1, 'data': 'None'}
if request.method == 'POST':
dataset_form = DatasetForm(request.POST, instance=dataset, prefix='edit')
if dataset_form.is_valid():
dataset = dataset_form.save()
response['status'] = 0
response['data'] = reverse('oozie:edit_coordinator', kwargs={'coordinator': dataset.coordinator.id}) + "#listDataset"
request.info(_('Dataset modified'))
if dataset.start > dataset.coordinator.start:
request.error(_('Beware: dataset start date was after the coordinator start date.'))
else:
response['data'] = dataset_form.errors
else:
dataset_form = DatasetForm(instance=dataset, prefix='edit')
if response['status'] != 0:
response['data'] = render('editor/edit_coordinator_dataset.mako', request, {
'coordinator': dataset.coordinator,
'dataset_form': dataset_form,
'dataset': dataset,
'path': request.path,
}, force_template=True).content
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
@check_job_edition_permission()
def create_coordinator_data(request, coordinator, data_type):
"""Returns {'status' 0/1, data:html or url}"""
if data_type == 'input':
data_instance = DataInput(coordinator=coordinator)
DataForm = DataInputForm
else:
data_instance = DataOutput(coordinator=coordinator)
DataForm = DataOutputForm
response = {'status': -1, 'data': 'None'}
if request.method == 'POST':
data_form = DataForm(request.POST, instance=data_instance, coordinator=coordinator, prefix=data_type)
if data_form.is_valid():
data_form.save()
response['status'] = 0
response['data'] = reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id})
request.info(_('Coordinator data created'));
else:
response['data'] = data_form.errors
else:
response['data'] = _('A POST request is required.')
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
def clone_coordinator(request, coordinator):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
clone = coordinator.clone(request.user)
response = {'url': reverse('oozie:edit_coordinator', kwargs={'coordinator': clone.id})}
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
def submit_coordinator(request, coordinator):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
job_id = _submit_coordinator(request, coordinator, mapping)
request.info(_('Coordinator submitted.'))
return redirect(reverse('oozie:list_oozie_coordinator', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = coordinator.find_all_parameters()
initial_params = ParameterForm.get_initial_params(dict([(param['name'], param['value']) for param in parameters]))
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'action': reverse('oozie:submit_coordinator', kwargs={'coordinator': coordinator.id})
}, force_template=True).content
return HttpResponse(json.dumps(popup), mimetype="application/json")
def _submit_coordinator(request, coordinator, mapping):
try:
wf_dir = Submission(request.user, coordinator.workflow, request.fs, request.jt, mapping).deploy()
properties = {'wf_application_path': request.fs.get_hdfs_path(wf_dir)}
properties.update(mapping)
submission = Submission(request.user, coordinator, request.fs, request.jt, properties=properties)
job_id = submission.run()
History.objects.create_from_submission(submission)
return job_id
except RestException, ex:
raise PopupException(_("Error submitting coordinator %s") % (coordinator,),
detail=ex._headers.get('oozie-error-message', ex))
def create_bundle(request):
bundle = Bundle(owner=request.user, schema_version='uri:oozie:bundle:0.2')
if request.method == 'POST':
bundle_form = BundleForm(request.POST, instance=bundle)
if bundle_form.is_valid():
bundle = bundle_form.save()
Document.objects.link(bundle, owner=bundle.owner, name=bundle.name, description=bundle.description)
return redirect(reverse('oozie:edit_bundle', kwargs={'bundle': bundle.id}))
else:
request.error(_('Errors on the form: %s') % bundle_form.errors)
else:
bundle_form = BundleForm(instance=bundle)
return render('editor/create_bundle.mako', request, {
'bundle': bundle,
'bundle_form': bundle_form,
})
def delete_bundle(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
skip_trash = 'skip_trash' in request.GET
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Job.objects.can_read_or_exception(request, job_id)
Job.objects.can_edit_or_exception(request, job)
if skip_trash:
Submission(request.user, job, request.fs, {}).remove_deployment_dir()
job.delete(skip_trash=skip_trash)
request.info(_('Bundle(s) deleted.'))
return redirect(reverse('oozie:list_bundles'))
def restore_bundle(request):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
job_ids = request.POST.getlist('job_selection')
for job_id in job_ids:
job = Job.objects.can_read_or_exception(request, job_id)
Job.objects.can_edit_or_exception(request, job)
job.restore()
request.info(_('Bundle(s) restored.'))
return redirect(reverse('oozie:list_bundles'))
@check_job_access_permission()
@check_job_edition_permission(True)
def edit_bundle(request, bundle):
history = History.objects.filter(submitter=request.user, job=bundle).order_by('-submission_date')
BundledCoordinatorFormSet = inlineformset_factory(Bundle, BundledCoordinator, form=BundledCoordinatorForm, max_num=0, can_order=False, can_delete=True)
bundle_form = BundleForm(instance=bundle)
if request.method == 'POST':
bundle_form = BundleForm(request.POST, instance=bundle)
bundled_coordinator_formset = BundledCoordinatorFormSet(request.POST, instance=bundle)
if bundle_form.is_valid() and bundled_coordinator_formset.is_valid():
bundle = bundle_form.save()
bundled_coordinator_formset.save()
request.info(_('Bundle saved.'))
return redirect(reverse('oozie:list_bundles'))
else:
bundle_form = BundleForm(instance=bundle)
bundled_coordinator_formset = BundledCoordinatorFormSet(instance=bundle)
return render('editor/edit_bundle.mako', request, {
'bundle': bundle,
'bundle_form': bundle_form,
'bundled_coordinator_formset': bundled_coordinator_formset,
'bundled_coordinator_html_form': get_create_bundled_coordinator_html(request, bundle),
'history': history
})
@check_job_access_permission()
@check_job_edition_permission(True)
def create_bundled_coordinator(request, bundle):
bundled_coordinator_instance = BundledCoordinator(bundle=bundle)
response = {'status': -1, 'data': 'None'}
if request.method == 'POST':
bundled_coordinator_form = BundledCoordinatorForm(request.POST, instance=bundled_coordinator_instance, prefix='create-bundled-coordinator')
if bundled_coordinator_form.is_valid():
bundled_coordinator_form.save()
response['status'] = 0
response['data'] = reverse('oozie:edit_bundle', kwargs={'bundle': bundle.id}) + "#listCoordinators"
request.info(_('Coordinator added to the bundle!'))
else:
bundled_coordinator_form = BundledCoordinatorForm(instance=bundled_coordinator_instance, prefix='create-bundled-coordinator')
if response['status'] != 0:
response['data'] = get_create_bundled_coordinator_html(request, bundle, bundled_coordinator_form=bundled_coordinator_form)
return HttpResponse(json.dumps(response), mimetype="application/json")
def get_create_bundled_coordinator_html(request, bundle, bundled_coordinator_form=None):
if bundled_coordinator_form is None:
bundled_coordinator_instance = BundledCoordinator(bundle=bundle)
bundled_coordinator_form = BundledCoordinatorForm(instance=bundled_coordinator_instance, prefix='create-bundled-coordinator')
return render('editor/create_bundled_coordinator.mako', request, {
'bundle': bundle,
'bundled_coordinator_form': bundled_coordinator_form,
}, force_template=True).content.decode('utf-8', 'replace')
@check_job_access_permission()
@check_job_edition_permission(True)
def edit_bundled_coordinator(request, bundle, bundled_coordinator):
bundled_coordinator_instance = BundledCoordinator.objects.get(id=bundled_coordinator) # todo secu
response = {'status': -1, 'data': 'None'}
if request.method == 'POST':
bundled_coordinator_form = BundledCoordinatorForm(request.POST, instance=bundled_coordinator_instance, prefix='edit-bundled-coordinator')
if bundled_coordinator_form.is_valid():
bundled_coordinator_form.save()
response['status'] = 0
response['data'] = reverse('oozie:edit_bundle', kwargs={'bundle': bundle.id}) + "#listCoordinators"
request.info(_('Bundled coordinator updated!'))
else:
bundled_coordinator_form = BundledCoordinatorForm(instance=bundled_coordinator_instance, prefix='edit-bundled-coordinator')
if response['status'] != 0:
response['data'] = render('editor/edit_bundled_coordinator.mako', request, {
'bundle': bundle,
'bundled_coordinator_form': bundled_coordinator_form,
'bundled_coordinator_instance': bundled_coordinator_instance,
}, force_template=True).content
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
def clone_bundle(request, bundle):
if request.method != 'POST':
raise PopupException(_('A POST request is required.'))
clone = bundle.clone(request.user)
response = {'url': reverse('oozie:edit_bundle', kwargs={'bundle': clone.id})}
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_access_permission()
def submit_bundle(request, bundle):
ParametersFormSet = formset_factory(ParameterForm, extra=0)
if request.method == 'POST':
params_form = ParametersFormSet(request.POST)
if params_form.is_valid():
mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data])
job_id = _submit_bundle(request, bundle, mapping)
request.info(_('Bundle submitted.'))
return redirect(reverse('oozie:list_oozie_bundle', kwargs={'job_id': job_id}))
else:
request.error(_('Invalid submission form: %s' % params_form.errors))
else:
parameters = bundle.find_all_parameters()
initial_params = ParameterForm.get_initial_params(dict([(param['name'], param['value']) for param in parameters]))
params_form = ParametersFormSet(initial=initial_params)
popup = render('editor/submit_job_popup.mako', request, {
'params_form': params_form,
'action': reverse('oozie:submit_bundle', kwargs={'bundle': bundle.id})
}, force_template=True).content
return HttpResponse(json.dumps(popup), mimetype="application/json")
def _submit_bundle(request, bundle, properties):
try:
deployment_dirs = {}
for bundled in bundle.coordinators.all():
wf_dir = Submission(request.user, bundled.coordinator.workflow, request.fs, request.jt, properties).deploy()
deployment_dirs['wf_%s_dir' % bundled.coordinator.workflow.id] = request.fs.get_hdfs_path(wf_dir)
coord_dir = Submission(request.user, bundled.coordinator, request.fs, request.jt, properties).deploy()
deployment_dirs['coord_%s_dir' % bundled.coordinator.id] = coord_dir
properties.update(deployment_dirs)
submission = Submission(request.user, bundle, request.fs, request.jt, properties=properties)
job_id = submission.run()
History.objects.create_from_submission(submission)
return job_id
except RestException, ex:
raise PopupException(_("Error submitting bundle %s") % (bundle,),
detail=ex._headers.get('oozie-error-message', ex))
def list_history(request):
"""
List the job submission history.
Normal users can only look at their own submissions.
"""
history = History.objects
if not request.user.is_superuser:
history = history.filter(submitter=request.user)
history = history.order_by('-submission_date')
return render('editor/list_history.mako', request, {
'history': history,
})
def list_history_record(request, record_id):
"""
List a job submission history.
Normal users can only look at their own jobs.
"""
history = History.objects
if not request.user.is_superuser:
history.filter(submitter=request.user)
history = history.get(id=record_id)
return render('editor/list_history_record.mako', request, {
'record': history,
})
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
oozie_setup.Command().handle_noargs()
activate_translation(request.LANGUAGE_CODE)
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return HttpResponse(json.dumps(result), mimetype="application/json")
def jasmine(request):
return render('editor/jasmine.mako', request, None)
|
{
"content_hash": "91f1bc339216f1653df5d1d83ae58f70",
"timestamp": "",
"source": "github",
"line_count": 848,
"max_line_length": 153,
"avg_line_length": 37.73231132075472,
"alnum_prop": 0.6993780666937526,
"repo_name": "pwong-mapr/private-hue",
"id": "574cd836b46f3ba488936ca7c5e04979ca57f362",
"size": "32788",
"binary": false,
"copies": "1",
"ref": "refs/heads/HUE-1096-abe",
"path": "apps/oozie/src/oozie/views/editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9913791"
},
{
"name": "C++",
"bytes": "200199"
},
{
"name": "CSS",
"bytes": "555666"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3076559"
},
{
"name": "JavaScript",
"bytes": "1072625"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "22498404"
},
{
"name": "Shell",
"bytes": "34636"
},
{
"name": "XSLT",
"bytes": "202363"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class AuditlogConfig(AppConfig):
name = 'auditlog'
|
{
"content_hash": "d508ef44afb6d448f1a77a5a05c0a223",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.2,
"alnum_prop": 0.7582417582417582,
"repo_name": "mrts/foodbank-campaign",
"id": "6bfa1cc1734e2ef98491468efe963cdfebce2208",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/auditlog/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63"
},
{
"name": "HTML",
"bytes": "26270"
},
{
"name": "JavaScript",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "88873"
},
{
"name": "Shell",
"bytes": "1172"
}
],
"symlink_target": ""
}
|
from paraview.simple import *
import glob, os.path
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
print("cwd=", os.getcwd())
dirname = '/home/boman/dev/Projet_MP/waves/sph/louis/workspace/tests_waterdrop'
# create a new 'XML Structured Grid Reader'
gridvts = XMLStructuredGridReader(FileName=os.path.join(dirname,'grid.vts'))
RenameSource('Grid', gridvts)
# create a new 'XML Unstructured Grid Reader'
inpfiles = sorted(glob.glob(os.path.join(dirname,'resFP_*.vtu')))
#print inpfiles
resFP_00000 = XMLUnstructuredGridReader(FileName=inpfiles)
resFP_00000.PointArrayStatus = ['max(mu_ab)', 'Pressure', 'Speed of sound', 'Mass density', 'Velocity', 'Smoothing length', 'Mass', 'Nb of neighbours']
RenameSource('Fixed particles', resFP_00000)
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
# create a new 'XML Unstructured Grid Reader'
inpfiles = sorted(glob.glob(os.path.join(dirname,'resMP_*.vtu')))
resMP_00000 = XMLUnstructuredGridReader(FileName=inpfiles)
resMP_00000.PointArrayStatus = ['max(mu_ab)', 'Pressure', 'Speed of sound', 'Mass density', 'Velocity', 'Smoothing length', 'Mass', 'Nb of neighbours']
RenameSource('Mobile particles', resMP_00000)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# uncomment following to set a specific view size
# renderView1.ViewSize = [997, 849]
# show data in view
gridvtsDisplay = Show(gridvts, renderView1)
# trace defaults for the display properties.
gridvtsDisplay.Representation = 'Outline'
gridvtsDisplay.ColorArrayName = ['POINTS', '']
gridvtsDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
gridvtsDisplay.SelectOrientationVectors = 'None'
gridvtsDisplay.ScaleFactor = 0.2
gridvtsDisplay.SelectScaleArray = 'None'
gridvtsDisplay.GlyphType = 'Arrow'
gridvtsDisplay.ScalarOpacityUnitDistance = 0.21650635094610968
# reset view to fit data
renderView1.ResetCamera()
# show data in view
resFP_00000Display = Show(resFP_00000, renderView1)
# trace defaults for the display properties.
resFP_00000Display.ColorArrayName = [None, '']
resFP_00000Display.OSPRayScaleArray = 'Mass'
resFP_00000Display.OSPRayScaleFunction = 'PiecewiseFunction'
resFP_00000Display.SelectOrientationVectors = 'Mass'
resFP_00000Display.ScaleFactor = 0.19512200355529785
resFP_00000Display.SelectScaleArray = 'Mass'
resFP_00000Display.GlyphType = 'Arrow'
resFP_00000Display.ScalarOpacityUnitDistance = 0.18843877254666028
resFP_00000Display.GaussianRadius = 0.09756100177764893
resFP_00000Display.SetScaleArray = ['POINTS', 'Mass']
resFP_00000Display.ScaleTransferFunction = 'PiecewiseFunction'
resFP_00000Display.OpacityArray = ['POINTS', 'Mass']
resFP_00000Display.OpacityTransferFunction = 'PiecewiseFunction'
# show data in view
resMP_00000Display = Show(resMP_00000, renderView1)
# trace defaults for the display properties.
resMP_00000Display.ColorArrayName = [None, '']
resMP_00000Display.OSPRayScaleArray = 'Mass'
resMP_00000Display.OSPRayScaleFunction = 'PiecewiseFunction'
resMP_00000Display.SelectOrientationVectors = 'Mass'
resMP_00000Display.ScaleFactor = 0.04545450210571289
resMP_00000Display.SelectScaleArray = 'Mass'
resMP_00000Display.GlyphType = 'Arrow'
resMP_00000Display.ScalarOpacityUnitDistance = 0.07157227916349206
resMP_00000Display.GaussianRadius = 0.022727251052856445
resMP_00000Display.SetScaleArray = ['POINTS', 'Mass']
resMP_00000Display.ScaleTransferFunction = 'PiecewiseFunction'
resMP_00000Display.OpacityArray = ['POINTS', 'Mass']
resMP_00000Display.OpacityTransferFunction = 'PiecewiseFunction'
# set active source
SetActiveSource(gridvts)
# change representation type
gridvtsDisplay.SetRepresentationType('Wireframe')
# Properties modified on gridvtsDisplay
gridvtsDisplay.Opacity = 0.2
# set active source
SetActiveSource(resFP_00000)
# change representation type
resFP_00000Display.SetRepresentationType('Points')
# Properties modified on resFP_00000Display
resFP_00000Display.PointSize = 5.0
# set active source
SetActiveSource(resMP_00000)
# create a new 'Glyph'
glyph1 = Glyph(Input=resMP_00000, GlyphType='Sphere')
glyph1.Scalars = ['POINTS', 'Smoothing length']
glyph1.Vectors = ['POINTS', 'None']
glyph1.ScaleMode = 'scalar'
glyph1.ScaleFactor = 1.0
glyph1.GlyphTransform = 'Transform2'
glyph1.GlyphMode = 'All Points'
# get color transfer function/color map for 'Smoothinglength'
smoothinglengthLUT = GetColorTransferFunction('Smoothinglength')
# show data in view
glyph1Display = Show(glyph1, renderView1)
# trace defaults for the display properties.
glyph1Display.ColorArrayName = ['POINTS', 'Smoothing length']
glyph1Display.LookupTable = smoothinglengthLUT
glyph1Display.OSPRayScaleArray = 'Smoothing length'
glyph1Display.OSPRayScaleFunction = 'PiecewiseFunction'
glyph1Display.SelectOrientationVectors = 'Mass'
glyph1Display.ScaleFactor = 0.051454496383666996
glyph1Display.SelectScaleArray = 'Smoothing length'
glyph1Display.GlyphType = 'Arrow'
glyph1Display.GaussianRadius = 0.025727248191833498
glyph1Display.SetScaleArray = ['POINTS', 'Smoothing length']
glyph1Display.ScaleTransferFunction = 'PiecewiseFunction'
glyph1Display.OpacityArray = ['POINTS', 'Smoothing length']
glyph1Display.OpacityTransferFunction = 'PiecewiseFunction'
# show color bar/color legend
glyph1Display.SetScalarBarVisibility(renderView1, True)
# get opacity transfer function/opacity map for 'Smoothinglength'
smoothinglengthPWF = GetOpacityTransferFunction('Smoothinglength')
# hide data in view
Hide(resMP_00000, renderView1)
# set scalar coloring
ColorBy(glyph1Display, ('POINTS', 'Velocity'))
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(smoothinglengthLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
glyph1Display.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
glyph1Display.SetScalarBarVisibility(renderView1, True)
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [2.242394659003803, -5.320849062052243, 2.8132656553463735]
renderView1.CameraFocalPoint = [1.0000000000000002, 0.9999999999999994, 0.9999999999999989]
renderView1.CameraViewUp = [-0.2489518711661434, 0.22153968976360192, 0.9428378077391271]
renderView1.CameraParallelScale = 1.7320508075688772
#### uncomment the following to render all views
RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
|
{
"content_hash": "0c49ccc8148b18a245c4c3509abd73d2",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 151,
"avg_line_length": 38.2906976744186,
"alnum_prop": 0.8053446705132098,
"repo_name": "rboman/progs",
"id": "78d57fa3cf6ab30f57ab46e397b8e6014ad54a74",
"size": "7223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/sph0/louis/tests/waterdrop_post.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "15571"
},
{
"name": "C",
"bytes": "166004"
},
{
"name": "C#",
"bytes": "2021"
},
{
"name": "C++",
"bytes": "1063256"
},
{
"name": "CMake",
"bytes": "211806"
},
{
"name": "Eiffel",
"bytes": "5484041"
},
{
"name": "Fortran",
"bytes": "576316"
},
{
"name": "GLSL",
"bytes": "3366"
},
{
"name": "HTML",
"bytes": "7199"
},
{
"name": "Java",
"bytes": "21330"
},
{
"name": "JavaScript",
"bytes": "28"
},
{
"name": "Julia",
"bytes": "1730"
},
{
"name": "Lua",
"bytes": "10474"
},
{
"name": "M",
"bytes": "143"
},
{
"name": "MATLAB",
"bytes": "7915698"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Objective-C++",
"bytes": "183"
},
{
"name": "PHP",
"bytes": "10089"
},
{
"name": "PostScript",
"bytes": "450068"
},
{
"name": "Processing",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "1107870"
},
{
"name": "QMake",
"bytes": "3608"
},
{
"name": "SWIG",
"bytes": "14104"
},
{
"name": "Shell",
"bytes": "52373"
},
{
"name": "TeX",
"bytes": "166564"
}
],
"symlink_target": ""
}
|
"""Datastore models used by the Google App Engine Pipeline API."""
from google.appengine.ext import db
from google.appengine.ext import blobstore
# Relative imports
import simplejson
class _PipelineRecord(db.Model):
"""Represents a Pipeline.
Properties:
class_path: Path of the Python class to use for this pipeline.
root_pipeline: The root of the whole workflow; set to itself this pipeline
is its own root.
fanned_out: List of child _PipelineRecords that were started when this
generator pipeline moved from WAITING to RUN.
start_time: For pipelines with no start _BarrierRecord, when this pipeline
was enqueued to run immediately.
finalized_time: When this pipeline moved from WAITING or RUN to DONE.
params: Serialized parameter dictionary.
status: The current status of the pipeline.
current_attempt: The current attempt (starting at 0) to run.
max_attempts: Maximum number of attempts (starting at 0) to run.
next_retry_time: ETA of the next retry attempt.
retry_message: Why the last attempt failed; None or empty if no message.
Root pipeline properties:
is_root_pipeline: This is a root pipeline.
abort_message: Why the whole pipeline was aborted; only saved on
root pipelines.
abort_requested: If an abort signal has been requested for this root
pipeline; only saved on root pipelines
"""
WAITING = 'waiting'
RUN = 'run'
DONE = 'done'
ABORTED = 'aborted'
class_path = db.StringProperty()
root_pipeline = db.SelfReferenceProperty(
collection_name='child_pipelines_set')
fanned_out = db.ListProperty(db.Key, indexed=False)
start_time = db.DateTimeProperty(indexed=True)
finalized_time = db.DateTimeProperty(indexed=False)
# One of these two will be set, depending on the size of the params.
params_text = db.TextProperty(name='params')
params_blob = blobstore.BlobReferenceProperty(indexed=False)
status = db.StringProperty(choices=(WAITING, RUN, DONE, ABORTED),
default=WAITING)
# Retry behavior
current_attempt = db.IntegerProperty(default=0, indexed=False)
max_attempts = db.IntegerProperty(default=1, indexed=False)
next_retry_time = db.DateTimeProperty(indexed=False)
retry_message = db.TextProperty()
# Root pipeline properties
is_root_pipeline = db.BooleanProperty()
abort_message = db.TextProperty()
abort_requested = db.BooleanProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Record'
@property
def params(self):
"""Returns the dictionary of parameters for this Pipeline."""
if hasattr(self, '_params_decoded'):
return self._params_decoded
if self.params_blob is not None:
value_encoded = self.params_blob.open().read()
else:
value_encoded = self.params_text
value = simplejson.loads(value_encoded)
if isinstance(value, dict):
kwargs = value.get('kwargs')
if kwargs:
adjusted_kwargs = {}
for arg_key, arg_value in kwargs.iteritems():
# Python only allows non-unicode strings as keyword arguments.
adjusted_kwargs[str(arg_key)] = arg_value
value['kwargs'] = adjusted_kwargs
self._params_decoded = value
return self._params_decoded
class _SlotRecord(db.Model):
"""Represents an output slot.
Properties:
root_pipeline: The root of the workflow.
filler: The pipeline that filled this slot.
value: Serialized value for this slot.
status: The current status of the slot.
fill_time: When the slot was filled by the filler.
"""
FILLED = 'filled'
WAITING = 'waiting'
root_pipeline = db.ReferenceProperty(_PipelineRecord)
filler = db.ReferenceProperty(_PipelineRecord,
collection_name='filled_slots_set')
# One of these two will be set, depending on the size of the value.
value_text = db.TextProperty(name='value')
value_blob = blobstore.BlobReferenceProperty(indexed=False)
status = db.StringProperty(choices=(FILLED, WAITING), default=WAITING,
indexed=False)
fill_time = db.DateTimeProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Slot'
@property
def value(self):
"""Returns the value of this Slot."""
if hasattr(self, '_value_decoded'):
return self._value_decoded
if self.value_blob is not None:
encoded_value = self.value_blob.open().read()
else:
encoded_value = self.value_text
self._value_decoded = simplejson.loads(encoded_value)
return self._value_decoded
class _BarrierRecord(db.Model):
"""Represents a barrier.
Properties:
root_pipeline: The root of the workflow.
target: The pipeline to run when the barrier fires.
blocking_slots: The slots that must be filled before this barrier fires.
trigger_time: When this barrier fired.
status: The current status of the barrier.
"""
# Barrier statuses
FIRED = 'fired'
WAITING = 'waiting'
# Barrier trigger reasons (used as key names)
START = 'start'
FINALIZE = 'finalize'
ABORT = 'abort'
root_pipeline = db.ReferenceProperty(_PipelineRecord)
target = db.ReferenceProperty(_PipelineRecord,
collection_name='called_barrier_set')
blocking_slots = db.ListProperty(db.Key)
trigger_time = db.DateTimeProperty(indexed=False)
status = db.StringProperty(choices=(FIRED, WAITING), default=WAITING,
indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Barrier'
class _StatusRecord(db.Model):
"""Represents the current status of a pipeline.
Properties:
message: The textual message to show.
console_url: URL to iframe as the primary console for this pipeline.
link_names: Human display names for status links.
link_urls: URLs corresponding to human names for status links.
status_time: When the status was written.
"""
root_pipeline = db.ReferenceProperty(_PipelineRecord)
message = db.TextProperty()
console_url = db.TextProperty()
link_names = db.ListProperty(db.Text, indexed=False)
link_urls = db.ListProperty(db.Text, indexed=False)
status_time = db.DateTimeProperty(indexed=False)
@classmethod
def kind(cls):
return '_AE_Pipeline_Status'
|
{
"content_hash": "8fe21dbe9dbf25496af6f1c6b77dd8a8",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 78,
"avg_line_length": 32.41538461538462,
"alnum_prop": 0.6954595791805094,
"repo_name": "russomi/appengine-pipeline-read-only",
"id": "568a86c9d679725d7fdb16fd867aedf2745e7195",
"size": "6920",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/pipeline/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DOT",
"bytes": "1303"
},
{
"name": "Java",
"bytes": "334132"
},
{
"name": "JavaScript",
"bytes": "91528"
},
{
"name": "Python",
"bytes": "613495"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('simulation', '0004_auto_20180325_2103'),
]
operations = [
migrations.AlterField(
model_name='simulationstagematch',
name='cat_password',
field=models.CharField(default=b'f1712181', max_length=100, verbose_name='Cat Password'),
preserve_default=True,
),
migrations.AlterField(
model_name='simulationstagematch',
name='rat_password',
field=models.CharField(default=b'ba7f9764', max_length=100, verbose_name='Rat Password'),
preserve_default=True,
),
migrations.AlterField(
model_name='simulationstagematch',
name='system_password',
field=models.CharField(default=b'8807f361', max_length=100, verbose_name='System Password'),
preserve_default=True,
),
]
|
{
"content_hash": "9f1f64b62ddccccdf471a08d26431ab5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 104,
"avg_line_length": 32.61290322580645,
"alnum_prop": 0.6083086053412463,
"repo_name": "bilbeyt/ituro",
"id": "0c1fe8f817b2830f41f5f7b3d51574225bbe6870",
"size": "1035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ituro/simulation/migrations/0005_auto_20180411_1928.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "142"
},
{
"name": "HTML",
"bytes": "72543"
},
{
"name": "JavaScript",
"bytes": "659"
},
{
"name": "Python",
"bytes": "294379"
}
],
"symlink_target": ""
}
|
"""
Coursera's tools for interacting with research data exports.
You may install it from source, or via pip.
"""
import argcomplete
import argparse
import logging
import sys
from courseraresearchexports import commands
from courseraresearchexports.commands import utils
def build_parser():
"""
Build an argparse argument parser to parse the command line.
"""
parser = argparse.ArgumentParser(
description="""Coursera tools for interacting with research exports.
There are a number of subcommands, each with their own help
documentation. Feel free to view them by executing `%(prog)s
SUB_COMMAND -h`. For example: `%(prog)s jobs -h`.""",
epilog="""Please file bugs on github at:
https://github.com/coursera/courseraresearchexports/issues. If you
would like to contribute to this tool's development, check us out at:
https://github.com/coursera/courseraresarchexports""")
utils.add_logging_parser(parser)
# We have a number of subcommands. These subcommands have their own
# subparsers. Each subcommand should set a default value for the 'func'
# option. We then call the parsed 'func' function, and execution carries on
# from there.
subparsers = parser.add_subparsers()
# create the parser for the version subcommand.
commands.version.parser(subparsers)
# create the parser for the jobs subcommand.
commands.jobs.parser(subparsers)
# create the parser for the containers subcommand.
commands.containers.parser(subparsers)
# create the parser for the db subcommand.
commands.db.parser(subparsers)
return parser
def main():
"""
Boots up the command line tool
"""
logging.captureWarnings(True)
parser = build_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
# Configure logging
args.setup_logging(args)
# Dispatch into the appropriate subcommand function.
try:
return args.func(args)
except SystemExit:
raise
except:
logging.exception('Problem when running command. Sorry!')
sys.exit(1)
if __name__ == "__main__":
main()
|
{
"content_hash": "89578659812c08d626fb34ead2cd3620",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 28.376623376623378,
"alnum_prop": 0.6929061784897025,
"repo_name": "coursera/courseraresearchexports",
"id": "8d188cf3236503391b881512ff044aba15da2aaf",
"size": "2806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courseraresearchexports/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97250"
}
],
"symlink_target": ""
}
|
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
import inspect
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table
from astropy.utils.data import get_readable_fileobj
from . import connect
from .docs import READ_DOCSTRING, WRITE_DOCSTRING
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **data** : list of converted column values
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file_like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a ``read()`` method,
or a list of strings.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read') or
('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
lines = table.splitlines()
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
lines = table
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognized by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip()
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Returns
-------
lines : iterator
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
lineterminator='')
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals)
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
if self.comment:
re_comment = re.compile(self.comment)
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment is not False:
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError('Column name {!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if guessing and len(self.colnames) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError('Length of names argument ({}) does not match number'
' of table columns ({})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""Set the ``data_lines`` attribute to the lines slice comprising the
table data values."""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""Set the fill values of the individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=numpy.bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""Replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
'''convert all values in table to a list of lists of strings'''
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""
"""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.info.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
converters_out = []
try:
for converter in converters:
converter_func, converter_type = converter
if not issubclass(converter_type, NoType):
raise ValueError()
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError):
raise ValueError('Error: invalid format for converters, see '
'documentation\n{}'.format(converters))
return converters_out
def _convert_vals(self, cols):
for col in cols:
# If a specific dtype was specified for a column, then use that
# to set the defaults, otherwise use the generic defaults.
default_converters = ([convert_numpy(col.dtype)] if col.dtype
else self.default_converters)
# If the user supplied a specific convert then that takes precedence over defaults
converters = self.converters.get(col.name, default_converters)
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
try:
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that doesn't fit in native C long).
# Put string at the top of the converters list for the next while iteration.
warnings.warn("OverflowError converting to {} in column {}, reverting to String."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
except IndexError:
raise ValueError(f'Column {col.name} failed to convert: {last_err}')
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(numpy.int),
convert_numpy(numpy.float),
convert_numpy(numpy.str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
t_cols = [numpy.ma.MaskedArray(x.data, mask=x.mask)
if hasattr(x, 'mask') and numpy.any(x.mask)
else x.data for x in cols]
out = Table(t_cols, names=[x.name for x in cols], meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
header = f"ASCII reader '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(READ_DOCSTRING).strip() + '\n\n' +
header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
header = f"ASCII writer '{io_format}' details\n"
func.__doc__ = (inspect.cleandoc(WRITE_DOCSTRING).strip() + '\n\n' +
header + re.sub('.', '=', header) + '\n')
func.__doc__ += inspect.cleandoc(cls.__doc__).strip()
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table.
Parameters
----------
table : `~astropy.table.Table`
Input table
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
if names is not None:
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
names = set(table.colnames)
if include_names is not None:
names.intersection_update(include_names)
if exclude_names is not None:
names.difference_update(exclude_names)
if names != set(table.colnames):
remove_names = set(table.colnames) - set(names)
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file_like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
table = self.outputter(cols, self.meta)
self.cols = self.header.cols
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None or
lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with ' +
'{}, but this is not a fast C reader: {}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars)
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
if 'delimiter' in kwargs:
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module. """
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and f'fast_{Writer._format_name}' in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES[f'fast_{Writer._format_name}'](**kwargs)
writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars)
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
|
{
"content_hash": "8cdc99711d9e848d3421b43dec71e770",
"timestamp": "",
"source": "github",
"line_count": 1543,
"max_line_length": 106,
"avg_line_length": 37.209332469215816,
"alnum_prop": 0.5975197686975302,
"repo_name": "MSeifert04/astropy",
"id": "e4c5cb7dc228ac644c60f09bb678aab6ca923bc9",
"size": "57478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/io/ascii/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9891588"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import sqlite3
from view import Frontend
from model import OssemDB
user_interface = Frontend()
member_database = OssemDB()
keep_running = ""
while keep_running != "q":
keep_running = user_interface.select_option()
if keep_running == "1":
member_database.new_member()
if keep_running == "2":
member_database.print_all_members()
|
{
"content_hash": "0035be29973a97924f919fc6ae30b90c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 49,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.6825842696629213,
"repo_name": "ossem/member_database",
"id": "8523f972ed87beccbf981ee85800d6870f82eba4",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5995"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_eg6_power_droid_crafted.iff"
result.attribute_template_id = 9
result.stfName("droid_name","eg_6_power_droid_base")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "422ee1c9503eed6fe40ef241b610d5f2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.6945337620578779,
"repo_name": "obi-two/Rebelion",
"id": "128bb71dd7d7e2479cd114e5753a2438b82f9e7a",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_eg6_power_droid_crafted.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.cache import default_cache
from .base import BaseAttachmentCache
class DefaultAttachmentCache(BaseAttachmentCache):
def __init__(self, **options):
super(DefaultAttachmentCache, self).__init__(default_cache, **options)
|
{
"content_hash": "a2b1dbcb059f89ef8be5ec700db647c6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 78,
"avg_line_length": 28.5,
"alnum_prop": 0.7508771929824561,
"repo_name": "mvaled/sentry",
"id": "4386f3bbc404557840ab32d48743366b0befa143",
"size": "285",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/attachments/default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import pymongo
import ujson as json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from datetime import datetime as dt
consumer_key = "cadDGYYIDjUz2aCzBqcy1Ff99"
consumer_secret = "Xi02ZDEvmWKMVw20WKoRmnUJlkiem3SbKeCRRuCfo8Oop8KF4n"
access_token = "363711223-HEyPCS2ediosYWHLsE0IFy9kWgHyjDLLIFY4L76T"
access_token_secret = "yRuW7kuRLmro52lVU3XnEW4bynWRij3eXUD4Np5ZozhoI"
# access_token = "ENTER YOUR ACCESS TOKEN"
# access_token_secret = "ENTER YOUR ACCESS TOKEN SECRET"
# consumer_key = "ENTER YOUR API KEY"
# consumer_secret = "ENTER YOUR API SECRET"
DB = 'twitter_stream'
def convert_tweet(d):
mentions = d["entities"]['user_mentions']
return {
'message_id': d['id'],
'subject': '',
'body': d['text'],
'sender_id': d['user']['id'],
'recipient_ids': [m['id'] for m in mentions],
'datetime': dt.fromtimestamp(float(d['timestamp_ms']) / 1000),
'mentions': [m['screen_name'] for m in mentions],
"hashtags": [h['text'] for h in d['entities']["hashtags"]],
'urls': [u['url'] for u in d['entities']["urls"]]
}
class StdOutListener(StreamListener):
def __init__(self, mongo_col):
self.collection = pymongo.MongoClient()[DB][mongo_col]
self.collection.remove()
def on_data(self, raw_data):
try:
data = json.loads(raw_data)
if data.get("lang") == 'en':
if data.get("entities") and \
data.get("entities").get('user_mentions') and \
data["entities"]['user_mentions']:
tweet = convert_tweet(data)
# print tweet
self.collection.insert(tweet)
except:
import traceback
traceback.print_exc(file=sys.stdout)
return True
def on_error(self, status):
print status
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('')
parser.add_argument('--terms', nargs='*')
parser.add_argument('--mongo_col')
args = parser.parse_args()
l = StdOutListener(args.mongo_col)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
if args.terms:
stream.filter(track=args.terms)
else:
stream.sample()
|
{
"content_hash": "d7d78df2d9c81da12481fa3ad7181d8b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 71,
"avg_line_length": 31.05128205128205,
"alnum_prop": 0.6131296449215524,
"repo_name": "xiaohan2012/lst",
"id": "3ca0b4040b5450228dbd8080a828859a53aeaec6",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter_stream_listener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23944"
},
{
"name": "JavaScript",
"bytes": "12920"
},
{
"name": "Makefile",
"bytes": "480"
},
{
"name": "Python",
"bytes": "326635"
},
{
"name": "Shell",
"bytes": "27798"
}
],
"symlink_target": ""
}
|
import doctest
import pickle
import unittest
from genshi import core
from genshi.core import Markup, Attrs, Namespace, QName, escape, unescape
from genshi.input import XML, ParseError
from genshi.compat import StringIO, BytesIO
class StreamTestCase(unittest.TestCase):
def test_render_utf8(self):
xml = XML('<li>Über uns</li>')
self.assertEqual(u'<li>Über uns</li>'.encode('utf-8'), xml.render(encoding='utf-8'))
def test_render_unicode(self):
xml = XML('<li>Über uns</li>')
self.assertEqual(u'<li>Über uns</li>', xml.render())
self.assertEqual(u'<li>Über uns</li>', xml.render(encoding=None))
def test_render_ascii(self):
xml = XML('<li>Über uns</li>')
self.assertEqual(u'<li>Über uns</li>'.encode('ascii'), xml.render(encoding='ascii'))
def test_render_output_stream_utf8(self):
xml = XML('<li>Über uns</li>')
strio = BytesIO()
self.assertEqual(None, xml.render(encoding='utf-8', out=strio))
self.assertEqual(u'<li>Über uns</li>'.encode('utf-8'), strio.getvalue())
def test_render_output_stream_unicode(self):
xml = XML('<li>Über uns</li>')
strio = StringIO()
self.assertEqual(None, xml.render(encoding=None, out=strio))
self.assertEqual(u'<li>Über uns</li>', strio.getvalue())
def test_pickle(self):
xml = XML('<li>Foo</li>')
buf = BytesIO()
pickle.dump(xml, buf, 2)
buf.seek(0)
xml = pickle.load(buf)
self.assertEquals('<li>Foo</li>', xml.render(encoding=None))
class MarkupTestCase(unittest.TestCase):
def test_new_with_encoding(self):
markup = Markup(u'Döner'.encode('utf-8'), encoding='utf-8')
# mimic Markup.__repr__ when constructing output for Python 2/3 compatibility
self.assertEquals("<Markup %r>" % u'D\u00f6ner', repr(markup))
def test_repr(self):
markup = Markup('foo')
self.assertEquals("<Markup u'foo'>", repr(markup))
def test_escape(self):
markup = escape('<b>"&"</b>')
assert type(markup) is Markup
self.assertEquals('<b>"&"</b>', markup)
def test_escape_noquotes(self):
markup = escape('<b>"&"</b>', quotes=False)
assert type(markup) is Markup
self.assertEquals('<b>"&"</b>', markup)
def test_unescape_markup(self):
string = '<b>"&"</b>'
markup = Markup.escape(string)
assert type(markup) is Markup
self.assertEquals(string, unescape(markup))
def test_Markup_escape_None_noquotes(self):
markup = Markup.escape(None, False)
assert type(markup) is Markup
self.assertEquals('', markup)
def test_add_str(self):
markup = Markup('<b>foo</b>') + '<br/>'
assert type(markup) is Markup
self.assertEquals('<b>foo</b><br/>', markup)
def test_add_markup(self):
markup = Markup('<b>foo</b>') + Markup('<br/>')
assert type(markup) is Markup
self.assertEquals('<b>foo</b><br/>', markup)
def test_add_reverse(self):
markup = '<br/>' + Markup('<b>bar</b>')
assert type(markup) is Markup
self.assertEquals('<br/><b>bar</b>', markup)
def test_mod(self):
markup = Markup('<b>%s</b>') % '&'
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mod_multi(self):
markup = Markup('<b>%s</b> %s') % ('&', 'boo')
assert type(markup) is Markup
self.assertEquals('<b>&</b> boo', markup)
def test_mod_mapping(self):
markup = Markup('<b>%(foo)s</b>') % {'foo': '&'}
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mod_noescape(self):
markup = Markup('<b>%(amp)s</b>') % {'amp': Markup('&')}
assert type(markup) is Markup
self.assertEquals('<b>&</b>', markup)
def test_mul(self):
markup = Markup('<b>foo</b>') * 2
assert type(markup) is Markup
self.assertEquals('<b>foo</b><b>foo</b>', markup)
def test_mul_reverse(self):
markup = 2 * Markup('<b>foo</b>')
assert type(markup) is Markup
self.assertEquals('<b>foo</b><b>foo</b>', markup)
def test_join(self):
markup = Markup('<br />').join(['foo', '<bar />', Markup('<baz />')])
assert type(markup) is Markup
self.assertEquals('foo<br /><bar /><br /><baz />', markup)
def test_join_over_iter(self):
items = ['foo', '<bar />', Markup('<baz />')]
markup = Markup('<br />').join(i for i in items)
self.assertEquals('foo<br /><bar /><br /><baz />', markup)
def test_stripentities_all(self):
markup = Markup('& j').stripentities()
assert type(markup) is Markup
self.assertEquals('& j', markup)
def test_stripentities_keepxml(self):
markup = Markup('& j').stripentities(keepxmlentities=True)
assert type(markup) is Markup
self.assertEquals('& j', markup)
def test_striptags_empty(self):
markup = Markup('<br />').striptags()
assert type(markup) is Markup
self.assertEquals('', markup)
def test_striptags_mid(self):
markup = Markup('<a href="#">fo<br />o</a>').striptags()
assert type(markup) is Markup
self.assertEquals('foo', markup)
def test_pickle(self):
markup = Markup('foo')
buf = BytesIO()
pickle.dump(markup, buf, 2)
buf.seek(0)
self.assertEquals("<Markup u'foo'>", repr(pickle.load(buf)))
class AttrsTestCase(unittest.TestCase):
def test_pickle(self):
attrs = Attrs([("attr1", "foo"), ("attr2", "bar")])
buf = BytesIO()
pickle.dump(attrs, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals("Attrs([('attr1', 'foo'), ('attr2', 'bar')])",
repr(unpickled))
def test_non_ascii(self):
attrs_tuple = Attrs([("attr1", u"föö"), ("attr2", u"bär")]).totuple()
self.assertEqual(u'fööbär', attrs_tuple[1])
class NamespaceTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual("Namespace('http://www.example.org/namespace')",
repr(Namespace('http://www.example.org/namespace')))
def test_repr_eval(self):
ns = Namespace('http://www.example.org/namespace')
self.assertEqual(eval(repr(ns)), ns)
def test_repr_eval_non_ascii(self):
ns = Namespace(u'http://www.example.org/nämespäcé')
self.assertEqual(eval(repr(ns)), ns)
def test_pickle(self):
ns = Namespace('http://www.example.org/namespace')
buf = BytesIO()
pickle.dump(ns, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals("Namespace('http://www.example.org/namespace')",
repr(unpickled))
self.assertEquals('http://www.example.org/namespace', unpickled.uri)
class QNameTestCase(unittest.TestCase):
def test_pickle(self):
qname = QName('http://www.example.org/namespace}elem')
buf = BytesIO()
pickle.dump(qname, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEquals('{http://www.example.org/namespace}elem', unpickled)
self.assertEquals('http://www.example.org/namespace',
unpickled.namespace)
self.assertEquals('elem', unpickled.localname)
def test_repr(self):
self.assertEqual("QName('elem')", repr(QName('elem')))
self.assertEqual("QName('http://www.example.org/namespace}elem')",
repr(QName('http://www.example.org/namespace}elem')))
def test_repr_eval(self):
qn = QName('elem')
self.assertEqual(eval(repr(qn)), qn)
def test_repr_eval_non_ascii(self):
qn = QName(u'élem')
self.assertEqual(eval(repr(qn)), qn)
def test_leading_curly_brace(self):
qname = QName('{http://www.example.org/namespace}elem')
self.assertEquals('http://www.example.org/namespace', qname.namespace)
self.assertEquals('elem', qname.localname)
def test_curly_brace_equality(self):
qname1 = QName('{http://www.example.org/namespace}elem')
qname2 = QName('http://www.example.org/namespace}elem')
self.assertEqual(qname1.namespace, qname2.namespace)
self.assertEqual(qname1.localname, qname2.localname)
self.assertEqual(qname1, qname2)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(StreamTestCase, 'test'))
suite.addTest(unittest.makeSuite(MarkupTestCase, 'test'))
suite.addTest(unittest.makeSuite(NamespaceTestCase, 'test'))
suite.addTest(unittest.makeSuite(AttrsTestCase, 'test'))
suite.addTest(unittest.makeSuite(QNameTestCase, 'test'))
suite.addTest(doctest.DocTestSuite(core))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "952a545a0a30439effb66b88e6110582",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 97,
"avg_line_length": 35.90909090909091,
"alnum_prop": 0.5964777105118327,
"repo_name": "hodgestar/genshi",
"id": "7b2f8a77beed22185dc52fee6bddfbc61ff0f48d",
"size": "9603",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "genshi/tests/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18893"
},
{
"name": "HTML",
"bytes": "550"
},
{
"name": "Python",
"bytes": "858169"
},
{
"name": "Shell",
"bytes": "728"
}
],
"symlink_target": ""
}
|
import sqlalchemy
from keystone.common.sql import migration_helpers
def list_constraints(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
user_table = sqlalchemy.Table('user', meta, autoload=True)
group_table = sqlalchemy.Table('group', meta, autoload=True)
domain_table = sqlalchemy.Table('domain', meta, autoload=True)
constraints = [{'table': user_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id},
{'table': group_table,
'fk_column': 'domain_id',
'ref_column': domain_table.c.id}]
return constraints
def upgrade(migrate_engine):
# SQLite does not support constraints, and querying the constraints
# raises an exception
if migrate_engine.name == 'sqlite':
return
migration_helpers.remove_constraints(list_constraints(migrate_engine))
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
migration_helpers.add_constraints(list_constraints(migrate_engine))
|
{
"content_hash": "5b4a18586c841ac594c04bbac6eb0654",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 33.15151515151515,
"alnum_prop": 0.6471663619744058,
"repo_name": "UTSA-ICS/keystone-kerberos",
"id": "bca00902cb94f05b31954daeb68fd2ef31e824b4",
"size": "1640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3521321"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
}
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class EducationSummaryV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'organization': 'OrganizationV30',
'url': 'UrlV30',
'external_ids': 'ExternalIDsV30',
'display_index': 'str',
'visibility': 'str',
'path': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'department_name': 'department-name',
'role_title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'url': 'url',
'external_ids': 'external-ids',
'display_index': 'display-index',
'visibility': 'visibility',
'path': 'path'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, url=None, external_ids=None, display_index=None, visibility=None, path=None): # noqa: E501
"""EducationSummaryV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._url = None
self._external_ids = None
self._display_index = None
self._visibility = None
self._path = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if organization is not None:
self.organization = organization
if url is not None:
self.url = url
if external_ids is not None:
self.external_ids = external_ids
if display_index is not None:
self.display_index = display_index
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
@property
def created_date(self):
"""Gets the created_date of this EducationSummaryV30. # noqa: E501
:return: The created_date of this EducationSummaryV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this EducationSummaryV30.
:param created_date: The created_date of this EducationSummaryV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this EducationSummaryV30. # noqa: E501
:return: The last_modified_date of this EducationSummaryV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this EducationSummaryV30.
:param last_modified_date: The last_modified_date of this EducationSummaryV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this EducationSummaryV30. # noqa: E501
:return: The source of this EducationSummaryV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EducationSummaryV30.
:param source: The source of this EducationSummaryV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this EducationSummaryV30. # noqa: E501
:return: The put_code of this EducationSummaryV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this EducationSummaryV30.
:param put_code: The put_code of this EducationSummaryV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def department_name(self):
"""Gets the department_name of this EducationSummaryV30. # noqa: E501
:return: The department_name of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this EducationSummaryV30.
:param department_name: The department_name of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this EducationSummaryV30. # noqa: E501
:return: The role_title of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this EducationSummaryV30.
:param role_title: The role_title of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this EducationSummaryV30. # noqa: E501
:return: The start_date of this EducationSummaryV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EducationSummaryV30.
:param start_date: The start_date of this EducationSummaryV30. # noqa: E501
:type: FuzzyDateV30
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this EducationSummaryV30. # noqa: E501
:return: The end_date of this EducationSummaryV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this EducationSummaryV30.
:param end_date: The end_date of this EducationSummaryV30. # noqa: E501
:type: FuzzyDateV30
"""
self._end_date = end_date
@property
def organization(self):
"""Gets the organization of this EducationSummaryV30. # noqa: E501
:return: The organization of this EducationSummaryV30. # noqa: E501
:rtype: OrganizationV30
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this EducationSummaryV30.
:param organization: The organization of this EducationSummaryV30. # noqa: E501
:type: OrganizationV30
"""
self._organization = organization
@property
def url(self):
"""Gets the url of this EducationSummaryV30. # noqa: E501
:return: The url of this EducationSummaryV30. # noqa: E501
:rtype: UrlV30
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this EducationSummaryV30.
:param url: The url of this EducationSummaryV30. # noqa: E501
:type: UrlV30
"""
self._url = url
@property
def external_ids(self):
"""Gets the external_ids of this EducationSummaryV30. # noqa: E501
:return: The external_ids of this EducationSummaryV30. # noqa: E501
:rtype: ExternalIDsV30
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this EducationSummaryV30.
:param external_ids: The external_ids of this EducationSummaryV30. # noqa: E501
:type: ExternalIDsV30
"""
self._external_ids = external_ids
@property
def display_index(self):
"""Gets the display_index of this EducationSummaryV30. # noqa: E501
:return: The display_index of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this EducationSummaryV30.
:param display_index: The display_index of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._display_index = display_index
@property
def visibility(self):
"""Gets the visibility of this EducationSummaryV30. # noqa: E501
:return: The visibility of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this EducationSummaryV30.
:param visibility: The visibility of this EducationSummaryV30. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this EducationSummaryV30. # noqa: E501
:return: The path of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this EducationSummaryV30.
:param path: The path of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EducationSummaryV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EducationSummaryV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "3119ac8cd292e098db4b4fe61e2afeeb",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 276,
"avg_line_length": 29.86304347826087,
"alnum_prop": 0.5961272475795297,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "9de3dd3c32fe90ea48646795f30aa47553a827f0",
"size": "13754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/education_summary_v30.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
from __future__ import division
from .processor import Processor
import os
class OpenPdf(Processor):
def open_pdf(self, root):
"""
Open the generated pdf file.
"""
pdf_name = root + os.path.extsep + 'pdf'
self.logger.info('Opening "{0}"...'.format(pdf_name))
os.system('/usr/bin/open "{0}"'.format(pdf_name))
|
{
"content_hash": "09bd62889e10e172060a132c955c12d9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 21.733333333333334,
"alnum_prop": 0.6595092024539877,
"repo_name": "olivierverdier/pydflatex",
"id": "ba3c61c6fa8b3b8cee0a829578160727ced16eef",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydflatex/open_pdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35459"
},
{
"name": "TeX",
"bytes": "986"
}
],
"symlink_target": ""
}
|
"""
CrowdFlower Search Relevance Challenge (Kaggle)
multi_svm_model.py: build one SVM model for each query (train and test queries are the same).
__author__: gbakie
"""
import re
import numpy as np
from collections import defaultdict
from operator import itemgetter
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import *
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from cf_io import load_training, load_testing, write_output
# Model parameters and constants
SEED = 7
TITLE_WEIGHT = 2
SVM_C = 2.
CUSTOM_STOP_WORDS = ['http','www','img','border','color','style','padding','table','font','inch','width','height','td', 'tr', 'nbsp', 'strong', 'li', 'ul']
stop_words = set(stopwords.words('english'))
stop_words.update(CUSTOM_STOP_WORDS)
stemmer = PorterStemmer()
def group_products_by_queries(data, test=False):
dict_queries = defaultdict(list)
for line in data:
id = line["id"]
query = line["query"].lower()
title = line["product_title"].lower()
title = re.sub("[^a-zA-Z0-9]"," ", title)
desc = line["product_description"].lower()
if '<' in desc or '{' in desc:
desc = ""
else:
desc = re.sub("[^a-zA-Z0-9]"," ", desc)
prod_text = " ".join((title*TITLE_WEIGHT, desc))
prod = [id,prod_text,preprocess(title)]
#prod = [id,prod_text,title]
if test == False:
rate = line["median_relevance"]
var = line["relevance_variance"]
if var == 0:
var = 4.
else:
var = 1./var
prod.extend([rate,var])
dict_queries[query].append(prod)
return dict_queries
# used for DEBUG
def dump_queries(train, test):
for query,prods_train in train.items():
print "Query: %s" % query
print "-------TRAIN--------"
for prod in prods_train:
print prod[2]
print "\n\n"
print "-------TEST---------"
prods_test = test[query]
for prod in prods_test:
print prod[2]
print "\n\n"
def get_product_vocab(dict_queries):
tok = RegexpTokenizer(r'\w+')
vocab = {}
for query,v in dict_queries.items():
words = defaultdict(int)
for prod in v:
w_prod = tok.tokenize(prod[1])
for w in w_prod:
#wt = stem(wt)
if not re.match(r'\d+$', w) and \
len(w) > 1 and \
w not in stop_words:
words[w] += 1
vocab[query] = words.keys()
#vocab[query] = [k for (k, v) in words.iteritems() if v > 1]
"""
print "Query: " + query
sorted_w = sorted(words.items(), key=lambda x:x[1], reverse=True)
print sorted_w
"""
return vocab
# tokenize sentence and apply stemmer to each token
def preprocess(sent):
tok = RegexpTokenizer(r'\w+')
words = tok.tokenize(sent)
new_words = []
for w in words:
new_words.append(stemmer.stem(w))
return " ".join(new_words)
def build_features(dict_queries, vocab, test=False):
features = {}
for query,v in dict_queries.items():
q_vocab = vocab[query]
prod_texts = []
Y = []
weights = []
ids = []
titles = []
if test == False:
for prod in v:
prod_texts.append(prod[1])
titles.append(prod[2])
Y.append(prod[3])
weights.append(prod[4])
else:
for prod in v:
ids.append(prod[0])
prod_texts.append(prod[1])
titles.append(prod[2])
vec_query = TfidfVectorizer(max_df=0.8,
min_df=1, stop_words=None,
use_idf=False, smooth_idf=False,
strip_accents='unicode',
analyzer='word',
vocabulary=q_vocab,
#sublinear_tf=True,
)
X = vec_query.fit_transform(prod_texts).todense()
cos_sim = cosine_sim(preprocess(query), titles)
X = np.hstack((X,cos_sim))
if test == False:
Y = np.asarray(Y)
features[query] = (X, Y, weights)
else:
ids = np.asarray(ids)
features[query] = (X, ids)
return features
# calculate cosine similarity between query and title
def cosine_sim(query, titles):
vec = TfidfVectorizer(max_df=1.,
min_df=1, stop_words='english',
use_idf=False, smooth_idf=False,
binary=True,
strip_accents='unicode',
analyzer='word',
)
full_data = titles + list(query)
vec.fit(full_data)
X_query = vec.transform([query])
X_title = vec.transform(titles)
scores = cosine_similarity(X_query, X_title)
return scores.T
# train a SVM model for each one of the training queries
# use models to predict relevance on test dataset
def train_and_classify(train, test):
prods_train = group_products_by_queries(train)
vocab = get_product_vocab(prods_train)
f = build_features(prods_train, vocab)
clfs = {}
for query,products in f.items():
(X, Y, weights) = products
print "query: %s. number of products: %d" % (query, X.shape[0])
clf = svm.SVC(C=SVM_C, random_state=SEED, kernel='linear')
clf.fit(X,Y)
#clf.fit(X,Y,weights)
clfs[query] = clf
prods_test = group_products_by_queries(test,True)
f_t = build_features(prods_test,vocab,True)
res = {}
for query,vs in f_t.items():
clf = clfs[query]
(v,id) = vs
pred = clf.predict(v)
for i,p in zip(id,pred):
res[i] = p
pred = []
for k in sorted(res.iterkeys()):
pred.append(res[k])
return pred
# used for DEBUG
def analyse(train, test):
prods_train = group_products_by_queries(train)
prods_test = group_products_by_queries(test,True)
dump_queries(prods_train, prods_test)
def main():
train = load_training()
test = load_testing()
#analyse(train, test)
pred = train_and_classify(train,test)
write_output(test,pred)
if __name__ == '__main__':
main()
|
{
"content_hash": "5a6787bc10272a564f2c599c2fe1281b",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 155,
"avg_line_length": 27.752100840336134,
"alnum_prop": 0.5364118092354278,
"repo_name": "gbakie/kaggle-cf-search",
"id": "a89240362d080a164e81430d6f0eeed93febc66f",
"size": "6605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/multi_svm_model.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9211"
}
],
"symlink_target": ""
}
|
import tempfile
from contextlib import contextmanager
import os
import zlib
from mock import MagicMock
from ... import core
from ...core.application_base import Application
@contextmanager
def make_file(contents, suffix, decompress=False):
"""Context manager to write data to a temporary file,
and delete on exit
:param contents: Data to write. string
:param suffix: File suffix. string
"""
if decompress:
contents = zlib.decompress(contents)
try:
_, fname = tempfile.mkstemp(suffix=suffix)
with open(fname, 'wb') as infile:
infile.write(contents)
yield fname
finally:
os.unlink(fname)
@contextmanager
def simple_catalog():
"""Context manager to create a temporary data file
:param suffix: File suffix. string
"""
with make_file('#a, b\n1, 2\n3, 4', '.csv') as result:
yield result
def simple_session():
collect = core.data_collection.DataCollection()
hub = core.hub.Hub()
result = core.Session(data_collection=collect, hub=hub,
application=MagicMock(Application),
command_stack=core.CommandStack())
result.command_stack.session = result
return result
|
{
"content_hash": "03db970a6ed6b1815909c8f5396c34cb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 61,
"avg_line_length": 25.367346938775512,
"alnum_prop": 0.6532582461786002,
"repo_name": "bsipocz/glue",
"id": "c94587c4121db821996199d18fa1eca1200f0b0c",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/core/tests/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""The Gumbel distribution."""
from equadratures.distributions.template import Distribution
from equadratures.distributions.recurrence_utils import jacobi_recurrence_coefficients
import numpy as np
from scipy.stats import gumbel_r
RECURRENCE_PDF_SAMPLES = 50000
class Gumbel(Distribution):
"""
The class defines a Gumbel object. It is the child of Distribution.
:param int shape_parameter:
The shape parameter associated with the Gumbel distribution.
"""
def __init__(self, location, scale_parameter):
self.scale_parameter = scale_parameter
self.location = location
if self.scale_parameter is not None:
self.bounds = np.array([-np.inf, np.inf])
if self.scale_parameter > 0:
mean, var, skew, kurt = gumbel_r.stats(loc=self.location, scale=self.scale_parameter, moments='mvsk')
self.parent = gumbel_r(loc=self.location, scale=self.scale_parameter)
self.mean = mean
self.variance = var
self.skewness = skew
self.kurtosis = kurt
self.x_range_for_pdf = np.linspace(self.location - 10.0, 20.0 + self.location, RECURRENCE_PDF_SAMPLES)
def get_description(self):
"""
A description of the Gumbel distribution.
:param Gumbel self:
An instance of the Gumbel class.
:return:
A string describing the Gumbel distribution.
"""
text = "is a Gumbel distribution is characterised by its scale parameter, which here is"+str(self.scale_parameter)+" and its location, given by "+str(self.location)+"."
return text
def get_pdf(self, points=None):
"""
A Gumbel probability density function.
:param Gumbel self:
An instance of the Gumbel class.
:param points:
Matrix of points for defining the probability density function.
:return:
An array of N equidistant values over the support of the Gumbel distribution.
:return:
Probability density values along the support of the Gumbel distribution.
"""
if points is not None:
return self.parent.pdf(points)
else:
raise(ValueError, 'Please digit an input for get_pdf method')
def get_cdf(self, points=None):
"""
A Gumbel cumulative density function.
:param Gumbel self:
An instance of the Gumbel class.
:param matrix points:
Matrix of points for defining the cumulative density function.
:return:
An array of N equidistant values over the support of the Gumbel distribution.
:return:
Cumulative density values along the support of the Gumbel distribution.
"""
if points is not None:
return self.parent.cdf(points)
else:
raise(ValueError, 'Please digit an input for get_cdf method')
def get_icdf(self, xx):
"""
A Gumbel inverse cumulative density function.
:param Gumbel:
An instance of Gumbel class
:param matrix xx:
A matrix of points at which the inverse cumulative density function need to be evaluated.
:return:
Inverse cumulative density function values of the Gumbel distribution.
"""
return self.parent.ppf(xx)
def get_samples(self, m=None):
"""
Generates samples from the Gumbel distribution.
:param Gumbel self:
An instance of Gumbel class
:param integer m:
Number of random samples. If no value is provided, a default of 5e05 is assumed.
:return:
A N-by-1 vector that contains the samples.
"""
if m is not None:
number = m
else:
number = 500000
return self.parent.rvs(size= number)
|
{
"content_hash": "d64c4abe3ac9bd4c7bf8384c7b1ad323",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 176,
"avg_line_length": 39.505050505050505,
"alnum_prop": 0.6169777550498594,
"repo_name": "psesh/Effective-Quadratures",
"id": "0de30a51110ff36445d8a5146ad4316fcbe3de2e",
"size": "3911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "equadratures/distributions/gumbel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1773160"
},
{
"name": "Python",
"bytes": "177611"
},
{
"name": "TeX",
"bytes": "1327"
}
],
"symlink_target": ""
}
|
from project.library import sub
def main(condition):
if condition:
return sub.sub_func(condition)
else:
y = 0
return sub.sub_func(y)
|
{
"content_hash": "1827ef400f8d507f4106427dad9af32b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 18.555555555555557,
"alnum_prop": 0.6107784431137725,
"repo_name": "Robpol86/coveralls-multi-ci",
"id": "64d5c1aa146169f4eddf03a0c5bd8756e2fc395d",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sample_project/project/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47869"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 5000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 2048
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['washer dryer', 'washing machine'],
'hair straighteners',
'television',
'dish washer',
['fridge freezer', 'fridge', 'freezer']
],
max_appliance_powers=[2400, 500, 200, 2500, 200],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 60, 1800, 60],
min_off_durations=[600, 12, 12, 1800, 12],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
2000: 1e-3,
10000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'axes': (0, 2),
'nonlinearity': rectify
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'axes': (0, 2),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 32,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e440.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
{
"content_hash": "be22f9839e25335df384f590002e8931",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 133,
"avg_line_length": 30.638686131386862,
"alnum_prop": 0.5612864800476474,
"repo_name": "mmottahedi/neuralnilm_prototype",
"id": "f2835ffabd9483ee3a768beacaaf3f2dc8d6aec2",
"size": "8395",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e440.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
}
|
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file is using str() which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, num):
"""This doesn't do anything.
Args:
num: in. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
num = str(num) # Use of str is not allowed.
yield (num, num)
|
{
"content_hash": "389c68aeaa29df6b6413165a2dd9e814",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 29.037037037037038,
"alnum_prop": 0.6377551020408163,
"repo_name": "prasanna08/oppia",
"id": "126b66f02f1c2ab7f3cf49af81ed28b39be8c3c0",
"size": "1407",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/linters/test_files/invalid_str.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import sys
from itertools import count
from unittest.mock import MagicMock, Mock, patch
import pytest
from kombu import Connection
from kombu.transport import pyamqp
def test_amqps_connection():
conn = Connection('amqps://')
assert conn.transport # evaluate transport, don't connect
assert conn.ssl
class MockConnection(dict):
def __setattr__(self, key, value):
self[key] = value
def connect(self):
pass
class test_Channel:
def setup(self):
class Channel(pyamqp.Channel):
wait_returns = []
def _x_open(self, *args, **kwargs):
pass
def wait(self, *args, **kwargs):
return self.wait_returns
def _send_method(self, *args, **kwargs):
pass
self.conn = Mock()
self.conn._get_free_channel_id.side_effect = count(0).__next__
self.conn.channels = {}
self.channel = Channel(self.conn, 0)
def test_init(self):
assert not self.channel.no_ack_consumers
def test_prepare_message(self):
assert self.channel.prepare_message(
'foobar', 10, 'application/data', 'utf-8',
properties={},
)
def test_message_to_python(self):
message = Mock()
message.headers = {}
message.properties = {}
assert self.channel.message_to_python(message)
def test_close_resolves_connection_cycle(self):
assert self.channel.connection is not None
self.channel.close()
assert self.channel.connection is None
def test_basic_consume_registers_ack_status(self):
self.channel.wait_returns = ['my-consumer-tag']
self.channel.basic_consume('foo', no_ack=True)
assert 'my-consumer-tag' in self.channel.no_ack_consumers
self.channel.wait_returns = ['other-consumer-tag']
self.channel.basic_consume('bar', no_ack=False)
assert 'other-consumer-tag' not in self.channel.no_ack_consumers
self.channel.basic_cancel('my-consumer-tag')
assert 'my-consumer-tag' not in self.channel.no_ack_consumers
class test_Transport:
def setup(self):
self.connection = Connection('pyamqp://')
self.transport = self.connection.transport
def test_create_channel(self):
connection = Mock()
self.transport.create_channel(connection)
connection.channel.assert_called_with()
def test_ssl_cert_passed(self):
ssl_dict = {
'ca_certs': '/etc/pki/tls/certs/something.crt',
'cert_reqs': "ssl.CERT_REQUIRED",
}
ssl_dict_copy = {k: ssl_dict[k] for k in ssl_dict}
connection = Connection('amqps://', ssl=ssl_dict_copy)
assert connection.transport.client.ssl == ssl_dict
def test_driver_version(self):
assert self.transport.driver_version()
def test_drain_events(self):
connection = Mock()
self.transport.drain_events(connection, timeout=10.0)
connection.drain_events.assert_called_with(timeout=10.0)
def test_dnspython_localhost_resolve_bug(self):
class Conn:
def __init__(self, **kwargs):
vars(self).update(kwargs)
def connect(self):
pass
self.transport.Connection = Conn
self.transport.client.hostname = 'localhost'
conn1 = self.transport.establish_connection()
assert conn1.host == '127.0.0.1:5672'
self.transport.client.hostname = 'example.com'
conn2 = self.transport.establish_connection()
assert conn2.host == 'example.com:5672'
def test_close_connection(self):
connection = Mock()
connection.client = Mock()
self.transport.close_connection(connection)
assert connection.client is None
connection.close.assert_called_with()
@pytest.mark.masked_modules('ssl')
def test_import_no_ssl(self, mask_modules):
pm = sys.modules.pop('amqp.connection')
try:
from amqp.connection import SSLError
assert SSLError.__module__ == 'amqp.connection'
finally:
if pm is not None:
sys.modules['amqp.connection'] = pm
class test_pyamqp:
def test_default_port(self):
class Transport(pyamqp.Transport):
Connection = MockConnection
c = Connection(port=None, transport=Transport).connect()
assert c['host'] == f'127.0.0.1:{Transport.default_port}'
def test_custom_port(self):
class Transport(pyamqp.Transport):
Connection = MockConnection
c = Connection(port=1337, transport=Transport).connect()
assert c['host'] == '127.0.0.1:1337'
def test_ssl(self):
# Test setting TLS by ssl=True.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(transport=Transport, ssl=True).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] is True
def test_ssl_dict(self):
# Test setting TLS by setting ssl as dict.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(transport=Transport, ssl={'a': 1, 'b': 2}).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] == {'a': 1, 'b': 2}
@pytest.mark.parametrize(
'hostname',
[
'broker.example.com',
'amqp://broker.example.com/0',
'amqps://broker.example.com/0',
'amqp://guest:guest@broker.example.com/0',
'amqp://broker.example.com;broker2.example.com'
])
def test_ssl_server_hostname(self, hostname):
# Test setting server_hostname from URI.
class Transport(pyamqp.Transport):
Connection = MagicMock()
Connection(
hostname, transport=Transport, ssl={'server_hostname': None}
).connect()
Transport.Connection.assert_called_once()
_, kwargs = Transport.Connection.call_args
assert kwargs['ssl'] == {'server_hostname': 'broker.example.com'}
def test_register_with_event_loop(self):
t = pyamqp.Transport(Mock())
conn = Mock(name='conn')
loop = Mock(name='loop')
t.register_with_event_loop(conn, loop)
loop.add_reader.assert_called_with(
conn.sock, t.on_readable, conn, loop,
)
def test_heartbeat_check(self):
t = pyamqp.Transport(Mock())
conn = Mock()
t.heartbeat_check(conn, rate=4.331)
conn.heartbeat_tick.assert_called_with(rate=4.331)
def test_get_manager(self):
with patch('kombu.transport.pyamqp.get_manager') as get_manager:
t = pyamqp.Transport(Mock())
t.get_manager(1, kw=2)
get_manager.assert_called_with(t.client, 1, kw=2)
|
{
"content_hash": "570c4481316abf4d3f6d73f19a5be585",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 73,
"avg_line_length": 31.008849557522122,
"alnum_prop": 0.6078767123287672,
"repo_name": "celery/kombu",
"id": "bd40239574cdda5e49875828d221e40de755b83a",
"size": "7008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "t/unit/transport/test_pyamqp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1844"
},
{
"name": "Makefile",
"bytes": "3788"
},
{
"name": "Python",
"bytes": "1218137"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sys import *
if version_info[0] == 2:
intern = intern
|
{
"content_hash": "9f3914b78539e219bab391dcb0aa0db0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.6730769230769231,
"repo_name": "AbsoluteMSTR/pies",
"id": "64087b099a4d0fe8f15f2a0e1a14efa530b11bc1",
"size": "104",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "pies/sys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31432"
},
{
"name": "Shell",
"bytes": "1169"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="shape", parent_name="scattercarpet.line", **kwargs):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["linear", "spline"]),
**kwargs,
)
|
{
"content_hash": "5fcad2d2efc4d8d40c2fabf31b93d036",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 39.083333333333336,
"alnum_prop": 0.6140724946695096,
"repo_name": "plotly/plotly.py",
"id": "bf3b7d18d1d1c63f1e62fde9954cafc00f674e27",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattercarpet/line/_shape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import platform
import os
print(os.name)
print(platform.system())
print(platform.release())
|
{
"content_hash": "6f1c0f1689ae3d8ddfeca60b1c4de076",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 25,
"avg_line_length": 18.4,
"alnum_prop": 0.782608695652174,
"repo_name": "dadavidson/Python_Lab",
"id": "fafedaaf98e4d8f588a020250626c383f2c9af6a",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-w3resource/Python_Basic/ex43.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2047495"
},
{
"name": "Python",
"bytes": "106265"
}
],
"symlink_target": ""
}
|
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
{
"content_hash": "4ab1b8b1b1f6403022e6e5c6454ea33d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 33,
"alnum_prop": 0.6363636363636364,
"repo_name": "OpenTouch/night-watch",
"id": "391e772e3159805db715b03f2f8144ed6ea1e37d",
"size": "711",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/nw/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "108181"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0' # numpydoc requires sphinc >= 1.0
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'arghphot'
copyright = '2016, Eduardo Balbinot'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'arghphotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'arghphot.tex', 'arghphot Documentation',
'Eduardo Balbinot', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'arghphot', 'arghphot Documentation',
['Eduardo Balbinot'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'arghphot', 'arghphot Documentation',
'Eduardo Balbinot', 'arghphot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "22a75f0de6398885db140daa0752886f",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 32.083969465648856,
"alnum_prop": 0.6950987389959553,
"repo_name": "balbinot/arghphot",
"id": "b6caef5f48e2c44b6a18674aa68d4b4ef93b7c59",
"size": "8850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "40948"
},
{
"name": "Python",
"bytes": "44833"
}
],
"symlink_target": ""
}
|
"""
Test for emptynet.py
"""
import unittest
import pexpect
class testEmptyNet( unittest.TestCase ):
prompt = 'mininet>'
def testEmptyNet( self ):
"Run simple CLI tests: pingall (verify 0% drop) and iperf (sanity)"
p = pexpect.spawn( 'python -m mininet.examples.emptynet' )
p.expect( self.prompt )
# pingall test
p.sendline( 'pingall' )
p.expect ( '(\d+)% dropped' )
percent = int( p.match.group( 1 ) ) if p.match else -1
self.assertEqual( percent, 0 )
p.expect( self.prompt )
# iperf test
p.sendline( 'iperf' )
p.expect( "Results: \['[\d.]+ .bits/sec', '[\d.]+ .bits/sec'\]" )
p.expect( self.prompt )
p.sendline( 'exit' )
p.wait()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "84065aaa283692a426ee38161086d2d8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 27.1,
"alnum_prop": 0.5473554735547356,
"repo_name": "5GExchange/escape",
"id": "0d4d01ddaab52bb1fc19bd26c850ad1cc337dcce",
"size": "836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mininet/examples/test/test_emptynet.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "381"
},
{
"name": "C",
"bytes": "9773701"
},
{
"name": "C++",
"bytes": "1144774"
},
{
"name": "Dockerfile",
"bytes": "4497"
},
{
"name": "HTML",
"bytes": "423218"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Makefile",
"bytes": "121260"
},
{
"name": "Objective-C",
"bytes": "2964"
},
{
"name": "Python",
"bytes": "2856844"
},
{
"name": "Roff",
"bytes": "80820"
},
{
"name": "Shell",
"bytes": "190566"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, confusion_matrix, precision_score, accuracy_score, recall_score, roc_auc_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# directory of kaggle prediction files
kagloc = "C:\Users\mworley\Dropbox\capstone\data\external\kaggle_predictions"
data_path = "C:\Users\mworley\Dropbox\capstone\data\processed"
# kaggle prediction scores
kagscores = pd.read_csv(kagloc + r'\kaggle_scores.csv')
# get training set, features and labels
x_train = pd.read_csv(data_path + r'\xtrain_1.csv')
y_train = pd.read_csv(data_path + r'\ytrain_1.csv', header=None)
x_train = x_train.drop('season', axis=1)
y_train = y_train.iloc[:, 0]
# standardize the training features
sc = StandardScaler()
sc.fit(x_train)
x_trainst = sc.transform(x_train)
# train using best model from machine learning phase
clf = LogisticRegression(penalty='l2', C=0.0001, random_state=0, n_jobs=-1)
clf.fit(x_trainst, y_train)
# get 2017 matchups, clean for feature and label sets
test2017 = pd.read_csv(data_path + r'\matchups_2017.csv')
test2017 = test2017[test2017.upsetpot == 1]
test2017 = test2017.drop('season', axis=1)
y_test = test2017['upset']
x_test = test2017.drop(['daynum', 'upset', 'upsetpot', 'win_t2', 't1_team_id', 't2_team_id', 'Win'], axis=1)
# standardize test features
x_testst = sc.transform(x_test)
# get predicted labels for 2017 test set
preds = clf.predict(x_testst)
probas = clf.predict_proba(x_testst)
probas = probas[:, 1]
# get classification scores for 2017 test set
accuracy = accuracy_score(y_test, preds)
roc_auc = roc_auc_score(y_test, probas)
precision = precision_score(y_test, preds)
recall = recall_score(y_test, preds)
f1 = f1_score(y_test, preds)
my_scores = {'accuracy': accuracy,
'roc_auc': roc_auc,
'precision': precision,
'recall': recall,
'f1': f1}
scores = ['accuracy', 'roc_auc', 'precision', 'recall', 'f1']
df = pd.DataFrame({'Label': ['L2 logistic regression', 'percentile of my model', 'kaggle mean', 'kaggle max', 'kaggle 25th percentile', 'kaggle 75th percentile']})
for x in scores:
s = kagscores[x]
score = my_scores[x]
x1 = stats.percentileofscore(s, score)
x2 = s.mean()
x3 = s.max()
x4 = s.quantile(q=0.25)
x5 = s.quantile(q=0.75)
c = pd.Series([score, x1, x2, x3, x4, x5]).rename(x)
c = c.round(3)
df[x] = c
# save kaggle comparison to reports directory
rpath = r'C:\Users\mworley\Dropbox\capstone\reports'
df.to_csv(rpath + r'\kaggle_compare.csv', index=False)
|
{
"content_hash": "70263f1f9d244d259f2395f07a21531f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 163,
"avg_line_length": 34.73076923076923,
"alnum_prop": 0.6995201181247693,
"repo_name": "mworles/capstone_one",
"id": "2ba2cc4a9a45da09cba7afdd02b724d75702f56e",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/reports/kaggle_compare.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2210124"
},
{
"name": "Python",
"bytes": "94698"
}
],
"symlink_target": ""
}
|
"""
Script to check for new clusterfuzz issues since the last rolled v8 revision.
Returns a json list with test case IDs if any.
Security considerations: The security key and request data must never be
written to public logs. Public automated callers of this script should
suppress stdout and stderr and only process contents of the results_file.
"""
import argparse
import httplib
import json
import os
import re
import sys
import urllib
import urllib2
# Constants to git repos.
BASE_URL = "https://chromium.googlesource.com"
DEPS_LOG = BASE_URL + "/chromium/src/+log/master/DEPS?format=JSON"
# Constants for retrieving v8 rolls.
CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
V8_COMMIT_RE = re.compile(
r"^Update V8 to version \d+\.\d+\.\d+ \(based on ([a-fA-F0-9]+)\)\..*")
# Constants for the clusterfuzz backend.
HOSTNAME = "backend-dot-cluster-fuzz.appspot.com"
# Crash patterns.
V8_INTERNAL_RE = re.compile(r"^v8::internal.*")
ANY_RE = re.compile(r".*")
# List of all api requests.
BUG_SPECS = [
{
"args": {
"job_type": "linux_asan_chrome_v8",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": V8_INTERNAL_RE,
},
{
"args": {
"job_type": "linux_asan_d8",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_ignition_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_v8_arm_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_ignition_v8_arm_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_v8_arm64_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
{
"args": {
"job_type": "linux_asan_d8_v8_mipsel_dbg",
"reproducible": "True",
"open": "True",
"bug_information": "",
},
"crash_state": ANY_RE,
},
]
def GetRequest(url):
url_fh = urllib2.urlopen(url, None, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def GetLatestV8InChromium():
"""Returns the commit position number of the latest v8 roll in chromium."""
# Check currently rolled v8 revision.
result = GetRequest(DEPS_LOG)
if not result:
return None
# Strip security header and load json.
commits = json.loads(result[5:])
git_revision = None
for commit in commits["log"]:
# Get latest commit that matches the v8 roll pattern. Ignore cherry-picks.
match = re.match(V8_COMMIT_RE, commit["message"])
if match:
git_revision = match.group(1)
break
else:
return None
# Get commit position number for v8 revision.
result = GetRequest(CRREV % git_revision)
if not result:
return None
commit = json.loads(result)
assert commit["repo"] == "v8/v8"
return commit["number"]
def APIRequest(key, **params):
"""Send a request to the clusterfuzz api.
Returns a json dict of the response.
"""
params["api_key"] = key
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
conn = httplib.HTTPSConnection(HOSTNAME)
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
# Never leak "data" into public logs.
data = response.read()
except:
raise Exception("ERROR: Connection problem.")
try:
return json.loads(data)
except:
raise Exception("ERROR: Could not read response. Is your key valid?")
return None
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--key-file", required=True,
help="A file with the clusterfuzz api key.")
parser.add_argument("-r", "--results-file",
help="A file to write the results to.")
options = parser.parse_args()
# Get api key. The key's content must never be logged.
assert options.key_file
with open(options.key_file) as f:
key = f.read().strip()
assert key
revision_number = GetLatestV8InChromium()
results = []
for spec in BUG_SPECS:
args = dict(spec["args"])
# Use incremented revision as we're interested in all revision greater than
# what's currently rolled into chromium.
if revision_number:
args["revision_greater_or_equal"] = str(int(revision_number) + 1)
# Never print issue details in public logs.
issues = APIRequest(key, **args)
assert issues is not None
for issue in issues:
if re.match(spec["crash_state"], issue["crash_state"]):
results.append(issue["id"])
if options.results_file:
with open(options.results_file, "w") as f:
f.write(json.dumps(results))
else:
print results
if __name__ == "__main__":
sys.exit(Main())
|
{
"content_hash": "25fd92d59e4d33aec4a95012f82f1899",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 23.829596412556054,
"alnum_prop": 0.6140383891607075,
"repo_name": "hkernbach/arangodb",
"id": "0fdffd93ac2756cb5640dd6fe00304b422ab8dd9",
"size": "5501",
"binary": false,
"copies": "9",
"ref": "refs/heads/devel",
"path": "3rdParty/V8/v5.7.492.77/tools/release/check_clusterfuzz.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "7502"
},
{
"name": "Batchfile",
"bytes": "62496"
},
{
"name": "C",
"bytes": "9184899"
},
{
"name": "C#",
"bytes": "96431"
},
{
"name": "C++",
"bytes": "278343201"
},
{
"name": "CMake",
"bytes": "664691"
},
{
"name": "CSS",
"bytes": "650173"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "51836"
},
{
"name": "HTML",
"bytes": "2415724"
},
{
"name": "Java",
"bytes": "1048556"
},
{
"name": "JavaScript",
"bytes": "54219725"
},
{
"name": "LLVM",
"bytes": "24019"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "17899"
},
{
"name": "M4",
"bytes": "658700"
},
{
"name": "Makefile",
"bytes": "522586"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "42998"
},
{
"name": "Objective-C",
"bytes": "98866"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "118092"
},
{
"name": "Pascal",
"bytes": "150599"
},
{
"name": "Perl",
"bytes": "906737"
},
{
"name": "Perl 6",
"bytes": "25883"
},
{
"name": "PowerShell",
"bytes": "20434"
},
{
"name": "Python",
"bytes": "4557865"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1089418"
},
{
"name": "Ruby",
"bytes": "1141022"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "508528"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "567028"
},
{
"name": "Yacc",
"bytes": "53063"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
w = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w)
loss = tf.reduce_mean(tf.square(y_ - y)) + tf.contrib.layers.l2_regularizer(lambda1)(w)
|
{
"content_hash": "9f02459a7ff5a46ef0776abc79ebe080",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 87,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6804123711340206,
"repo_name": "pearpai/TensorFlow-action",
"id": "4f5f9b226924fbdf83265b1ea5528e1799923cf8",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_learning_with_tensorFlow/Chapter04/p8801.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "193301"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/google/firebase'
_MODULE_NAME = 'firebase-iid'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
{
"content_hash": "c8344e63de273d46a68da00e9e5cd72d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.542857142857144,
"alnum_prop": 0.5942028985507246,
"repo_name": "nwjs/chromium.src",
"id": "f4f2531790f18df54ae45a74441fc1db03eec08f",
"size": "2497",
"binary": false,
"copies": "2",
"ref": "refs/heads/nw70",
"path": "third_party/android_deps/libs/com_google_firebase_firebase_iid/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
all dj-webmachine resources should inherit from the Resource object:
.. code-block:: python
from webmachine import Resource
class MyResource(Resource):
pass
All Resource methods are of the signature:
.. code-block:: python
def f(self, req, resp):
return result
``req`` is a :class:`django.http.HttpRequest` instance, and ``resp`` a
:class:`django.http.HttpResource` instance. This instances have been
:ref:`improved to support more HTTP semantics <http>`. At any time you
can manipulate this object to return the response you want or pass
values to other methods.
There are over 30 Resource methods you can define, but any of them can
be omitted as they have reasonable defaults.
"""
from __future__ import with_statement
from datetime import datetime
import os
import re
import sys
import traceback
import types
try:
import json
except ImportError:
import django.utils.simplejson as json
from django.utils.translation import activate, deactivate_all, get_language, \
string_concat
from django.utils.encoding import smart_str, force_unicode
from webmachine.exc import HTTPException, HTTPInternalServerError
from webmachine.wrappers import WMRequest, WMResponse
from webmachine.decisions import b13, TRANSITIONS, first_match
CHARSET_RE = re.compile(r';\s*charset=([^;]*)', re.I)
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'app_label', 'resource_path')
def update_trace(resource, state, req, resp, trace):
if not resource.trace:
# do nothing
return
infos = {
"request": {
"headers": req.headers.items(),
"get": [(k, req.GET.getlist(k)) for k in req.GET],
"post": [(k, req.POST.getlist(k)) for k in req.POST],
"cookies": [(k, req.COOKIES.get(k)) for k in req.COOKIES],
"url_args": req.url_args,
"url_kwarg": req.url_kwargs
},
"response": {
"code": resp.status_code,
"headers": resp.headerlist
}
}
if hasattr(req, 'session'):
infos['request'].update({
'session': [(k, req.session.get(k)) for k in \
req.session.keys()]
})
if isinstance(state, int):
name = str(state)
else:
name = state.__name__
trace.append((name, infos))
def update_ex_trace(trace, e):
trace.append(("error", traceback.format_exc()))
def write_trace(res, trace):
if not res.trace:
return
if not res.trace_path:
trace_path = "/tmp"
now = datetime.now().replace(microsecond=0).isoformat() + 'Z'
fname = os.path.join(os.path.abspath(trace_path),
"wmtrace-%s-%s.json" % (res.__class__.__name__, now))
with open(fname, "w+b") as f:
f.write(json.dumps(trace))
class Options(object):
""" class based on django.db.models.options. We only keep
useful bits."""
def __init__(self, meta, app_label=None):
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.resource_path = None
self.object_name, self.app_label = None, app_label
self.meta = meta
def contribute_to_class(self, cls, name):
cls._meta = self
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
self.resource_path = self.module_name
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# verbose_name_plural is a special case because it uses a 's'
# by default.
setattr(self, 'verbose_name_plural', meta_attrs.pop('verbose_name_plural',
string_concat(self.verbose_name, 's')))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
class ResourceMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(ResourceMeta, cls).__new__
parents = [b for b in bases if isinstance(b, ResourceMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
new_class = super_new(cls, name, bases, attrs)
attr_meta = attrs.pop('Meta', None)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
if getattr(meta, 'app_label', None) is None:
document_module = sys.modules[new_class.__module__]
app_label = document_module.__name__.split('.')[-2]
else:
app_label = getattr(meta, 'app_label')
new_class.add_to_class('_meta', Options(meta, app_label=app_label))
return new_class
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
RESOURCE_METHODS = ["allowed_methods", "allow_missing_post",
"auth_required", "charsets_provided", "content_types_accepted",
"content_types_provided", "created_location", "delete_completed",
"delete_resource", "encodings_provided", "expires", "finish_request",
"forbidden", "format_suffix_accepted", "generate_etag", "is_authorized",
"is_conflict", "known_content_type", "known_methods",
"languages_provided", "last_modified", "malformed_request",
"moved_permanently", "moved_temporarily", "multiple_choices", "options",
"ping", "post_is_create", "previously_existed", "process_post",
"resource_exists", "service_available", "uri_too_long",
"valid_content_headers", "valid_entity_length", "variances"]
# FIXME: we should propbably wrap full HttpRequest object instead of
# adding properties to it in __call__ . Also datetime_utils has surely
# equivalent in Django.
class Resource(object):
__metaclass__ = ResourceMeta
base_url = None
csrf_exempt = True
url_regexp = r"^$"
trace = False
trace_path = None
def allowed_methods(self, req, resp):
"""
If a Method not in this list is requested, then a
405 Method Not Allowed will be sent. Note that
these are all-caps and are string.
:return: [Method]
"""
return ["GET", "HEAD"]
def allow_missing_post(self, req, resp):
"""
If the resource accepts POST requests to nonexistent resources,
then this should return True.
:return: True or False
"""
return False
def auth_required(self, req, resp):
"""
:return: True or False
"""
return True
def charsets_provided(self, req, resp):
"""
If this is anything other than None, it must be a list of pairs
where each pair is of the form Charset, Converter where Charset
is a string naming a charset and Converter is a callable function
in the resource which will be called on the produced body in a GET
and ensure that it is in Charset.
Ex:
return [("iso-8859-1", lambda x: x)]
Returning None prevents the character set negotiation
logic.
:return: [(Charset, Handler)]
"""
return None
def content_types_accepted(self, req, resp):
"""
This is used similarly to content_types_provided,
except that it is for incoming resource representations
-- for example, PUT requests.
:return: [(MediaType, Handler)] or None
"""
return []
def content_types_provided(self, req, resp):
"""
This should return a list of pairs where each pair is of the form
(Mediatype, Handler) where Mediatype is a string of content-type
format and the Handler is an callable function which can provide
a resource representation in that media type. Content negotiation
is driven by this return value. For example, if a client request
includes an Accept header with a value that does not appear as a
first element in any of the return tuples, then a 406 Not Acceptable
will be sent.
:return: [(MediaType, Handler)] or None
"""
return [
("text/html", self.to_html)
]
def created_location(self, req, resp):
"""
:return: Path or None
"""
return None
def delete_completed(self, req, resp):
"""
This is only called after a successful delete_resource
call, and should return false if the deletion was accepted
but cannot yet be guaranteed to have finished.
:return: True or False
"""
return True
def delete_resource(self, req, resp):
"""
This is called when a DELETE request should be enacted,
and should return true if the deletion succeeded.
:return: True or False
"""
return False
def encodings_provided(self, req, resp):
"""\
This must be a list of pairs where in each pair Encoding
is a string naming a valid content encoding and Encoder
is a callable function in the resource which will be
called on the produced body in a GET and ensure that it
is so encoded. One useful setting is to have the function
check on method, and on GET requests return [("identity", lambda x: x)]
as this is all that is needed to support identity encoding.
return [("identity", lambda x: x)]
Returning None prevents the encoding negotiation logic.
:return: [(Encoding, Encoder)]
"""
return None
def expires(self, req, resp):
"""
:return: Nonr or Date string
"""
return None
def finish_request(self, req, resp):
"""
This function, if defined, is called just before the final
response is constructed and sent. The Result is ignored, so
any effect of this function must be by returning a modified
request.
:return: True or False
"""
return True
def forbidden(self, req, resp):
"""
:return: True or False
"""
return False
def format_suffix_accepted(self, req, resp):
"""
Allows you to force the accepted format depending on path
suffix.
Ex: return [("json", "application/json")]
will allows to force `Accept` header to `application/json` on
url `/some/url.json`
:return: [(Suffix, MediaType)] or None
"""
return []
def generate_etag(self, req, resp):
"""
If this returns a value, it will be used as the value of the ETag
header and for comparison in conditional requests.
:return: Str or None
"""
return None
def is_authorized(self, req, resp):
"""
If this returns anything other than true, the response will
be 401 Unauthorized. The AuthHead return value will be used
as the value in the WWW-Authenticate header.
:return: True or False
"""
return True
def is_conflict(self, req, resp):
"""
If this returns true, the client will receive a 409 Conflict.
:return: True or False
"""
return False
def known_content_type(self, req, resp):
"""
:return: True or False
"""
return True
def known_methods(self, req, resp):
"""
:return: set([Method])
"""
return set([
"GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "OPTIONS"
])
def languages_provided(self, req, resp):
"""\
return ["en", "es", "en-gb"]
returning None short circuits the language negotiation
:return: [Language]
"""
return None
def last_modified(self, req, resp):
"""
:return: DateString or None
"""
return None
def malformed_request(self, req, resp):
"""
:return: True or False
"""
return False
def moved_permanently(self, req, resp):
"""
:return: True Or False
"""
return False
def moved_temporarily(self, req, resp):
"""
:return: True or False
"""
return False
def multiple_choices(self, req, resp):
"""
If this returns true, then it is assumed that multiple
representations of the response are possible and a single
one cannot be automatically chosen, so a 300 Multiple Choices
will be sent instead of a 200.
:return: True or False
"""
return False
def options(self, req, resp):
"""
If the OPTIONS method is supported and is used, the return
value of this function is expected to be a list of pairs
representing header names and values that should appear
in the response.
:return: [(HeaderName, Value)]
"""
return []
def ping(self, req, resp):
"""
:return: True or False
"""
return True
def post_is_create(self, req, resp):
"""
If POST requests should be treated as a request to put content
into a (potentially new) resource as opposed to being a generic
submission for processing, then this function should return true.
If it does return true, then created_location will be called and the
rest of the request will be treated much like a PUT to the Path
entry returned by that call.
:return: True or False
"""
return False
def previously_existed(self, req, resp):
"""
:return: True or False
"""
return False
def process_post(self, req, resp):
"""
If post_is_create returns false, then this will be called to process
any POST requests. If it succeeds, it should return True.
:return: True or False
"""
return False
def resource_exists(self, req, resp):
"""
Returning non-true values will result in 404 Not Found.
:return: True or False
"""
return True
def service_available(self, req, resp):
"""
:return: True or False
"""
return True
def uri_too_long(self, req, resp):
"""
:return: True or False
"""
return False
def valid_content_headers(self, req, resp):
"""
:return: True or False
"""
return True
def valid_entity_length(self, req, resp):
"""
:return: True or False
"""
return True
def variances(self, req, resp):
"""
If this function is implemented, it should return a list
of strings with header names that should be included in
a given response's Vary header. The standard conneg headers
(Accept, Accept-Encoding, Accept-Charset, Accept-Language)
do not need to be specified here as Webmachine will add the
correct elements of those automatically depending on resource
behavior.
:return: True or False
"""
return []
def get_urls(self):
"""
method used to register utls in django urls routing.
:return: urlpattern
"""
from django.conf.urls.defaults import patterns, url
regexp = getattr(self, "url_regexp") or r'^$'
urlpatterns = patterns('',
url(regexp, self, name="%s_index" % self.__class__.__name__),
)
return urlpatterns
###################
# PRIVATE METHODS #
###################
def _process(self, req, *args, **kwargs):
""" Process request and return the response """
req = WMRequest(req.environ, *args, **kwargs)
# initialize response object
resp = WMResponse(request=req)
# force format ?
url_parts = req.path.rsplit(".", 1)
try:
fmt = url_parts[1]
fctype = first_match(self.format_suffix_accepted, req, resp,
fmt)
if fctype is not None:
req.META['HTTP_ACCEPT'] = fctype
except IndexError:
pass
ctypes = [ct for (ct, func) in (self.content_types_provided(req, resp) or [])]
if len(ctypes):
ctype = ctypes[0]
if not ctype:
ctype = resp.default_content_type
resp.content_type = ctype
trace = []
try:
state = b13
while not isinstance(state, int):
if state(self, req, resp):
state = TRANSITIONS[state][0]
else:
state = TRANSITIONS[state][1]
if not isinstance(state, (int, types.FunctionType)):
raise HTTPInternalServerError("Invalid state: %r" % state)
update_trace(self, state, req, resp, trace)
resp.status_code = state
except HTTPException, e:
# Error while processing request
# Return HTTP response
update_ex_trace(trace, e)
return e
self.finish_request(req, resp)
# write the trace if needed
write_trace(self, trace)
# hack, django try to cache all the response and put it in
# pickle rather than just caching needed infos.
# since request object isn't pickable, remove it before
# returning.
del resp.request
# return final response.
return resp
def __call__(self, request, *args, **kwargs):
return self._process(request, *args, **kwargs)
|
{
"content_hash": "9e2e89bb6ffb093cae2e4d0a47595775",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 122,
"avg_line_length": 30.5578125,
"alnum_prop": 0.57646878355576,
"repo_name": "benoitc/dj-webmachine",
"id": "cc4519b7a240c1375fa14a424f3cd8f73db57e00",
"size": "19692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webmachine/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1515"
},
{
"name": "HTML",
"bytes": "2446"
},
{
"name": "JavaScript",
"bytes": "18922"
},
{
"name": "Python",
"bytes": "120249"
}
],
"symlink_target": ""
}
|
from rna_tools.tools.simrna_trajectory.simrna_trajectory import SimRNATrajectory
s = SimRNATrajectory()
s.load_from_file('rp14_aa22-6d8fb934_ALL.trafl', top_level=True)
s.plot_energy('plot1.png')
s.sort()
s.plot_energy('plot2.png')
|
{
"content_hash": "05c73929c6ef6886a0f618b8ac01e7bc",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 80,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.7758620689655172,
"repo_name": "m4rx9/rna-pdb-tools",
"id": "bc6cfea4233ac54a58e0bc05e668c550464732fa",
"size": "254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "notes/SimRNA_and_SimRNAweb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34107"
},
{
"name": "Shell",
"bytes": "1130"
}
],
"symlink_target": ""
}
|
"""Configuration structure and functionality for testing config validity."""
import argparse
import getpass
import grp
import inspect
import logging
import multiprocessing
import os
import pathlib
import pwd
import socket
import tempfile
from .exceptions import ConfigException
from .utils import Singleton, get_version, mailname
__all__ = ("parse_cmd_args", "warn_options", "config_test", "Config")
"""Tuple all the things."""
def parse_cmd_args(args):
"""
Parse arguments from the command line.
https://kura.gg/blackhole/configuration.html#command-line-options
:param list args: Command line arguments.
:returns: Parsed command line arguments.
:rtype: :py:class:`argparse.Namespace`
"""
ls_help = (
"Disable ssl.OP_SINGLE_DH_USE and ssl.OP_SINGLE_ECDH_USE. "
"Reduces CPU overhead at the expense of security -- Don't "
"use this option unless you really need to."
)
description = (
"Blackhole is an MTA (mail transfer agent) that "
"(figuratively) pipes all mail to /dev/null. Blackhole is "
"built on top of asyncio and utilises async def and await "
"statements available in Python 3.5 and above."
)
epilog = (
"An explanation of all command line and all configuration "
"options is provided here -- "
"https://kura.gg/blackhole/configuration.html"
)
parser = argparse.ArgumentParser(
"blackhole",
description=description,
epilog=epilog,
)
parser.add_argument(
"-v",
"--version",
action="version",
version=get_version(),
)
parser.add_argument(
"-c",
"--conf",
type=str,
dest="config_file",
metavar="FILE",
help="override the default configuration options",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-t",
"--test",
dest="test",
action="store_true",
help="perform a configuration test",
)
group.add_argument(
"-d",
"--debug",
dest="debug",
action="store_true",
help="enable debugging mode",
)
group.add_argument(
"-b",
"--background",
dest="background",
action="store_true",
help="run in the background",
)
group.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="Suppress warnings",
)
parser.add_argument(
"-ls",
"--less-secure",
dest="less_secure",
action="store_true",
help=ls_help,
)
return parser.parse_args(args)
def warn_options(config):
"""
Warn the user when using certain options.
:param Config config: The configuration.
"""
logger = logging.getLogger("blackhole.warnings")
if config.args.less_secure:
logger.warning(
"Using -ls or --less-secure reduces security on "
"SSL/TLS connections.",
)
if not config.tls_dhparams and len(config.tls_listen) > 0:
logger.warning(
"TLS is enabled but no Diffie Hellman ephemeral "
"parameters file was provided.",
)
_compare_uid_and_gid(config)
def config_test(args):
"""
Test the validity of the configuration file content.
:param argparse.Namespace args: Parsed arguments.
:raises SystemExit: Exit code :py:obj:`os.EX_OK` when config is valid
or :py:obj:`os.EX_USAGE` when config is invalid.
.. note::
Problems with the configuration will be written to the console using
the :py:mod:`logging` module.
"""
logger = logging.getLogger("blackhole.config.test")
logger.setLevel(logging.INFO)
try:
conf = Config(args.config_file).load().test()
conf.args = args
except ConfigException:
logger.critical("Config error")
raise SystemExit(os.EX_USAGE)
logger.info(f"blackhole: {args.config_file} syntax is OK.")
logger.info(f"blackhole: {args.config_file} test was successful.")
warn_options(conf)
raise SystemExit(os.EX_OK)
def _compare_uid_and_gid(config):
"""
Compare the current user and group and conf settings.
:param Config config: The configuration.
"""
logger = logging.getLogger("blackhole.warnings")
uid, gid = os.getuid(), os.getgid()
user, group = config.user, config.group
if (uid == 0 and gid == 0) and (user == "root" and group == "root"):
logger.warning(
"It is unsafe to run Blackhole as root without setting "
"a user and group for privilege separation.",
)
class Config(metaclass=Singleton):
"""
Configuration module.
Default values are provided as well as self-test functionality
to sanity check configuration.
https://kura.gg/blackhole/configuration.html#configuration-options
"""
args = None
"""Arguments parsed from the command line."""
config_file = None
"""A file containing configuration values."""
_workers = 1
_listen = []
_tls_listen = []
_user = None
_group = None
_timeout = 60
_tls_key = None
_tls_cert = None
_tls_dhparams = None
_pidfile = os.path.join(tempfile.gettempdir(), "blackhole.pid")
_delay = None
_mode = "accept"
_max_message_size = 512000
_dynamic_switch = None
def __init__(self, config_file=None):
"""
Initialise the configuration.
:param str config_file: The configuration file path. Default: ``None``
"""
if config_file:
self.config_file = config_file
self.user = getpass.getuser()
self.group = grp.getgrgid(os.getgid()).gr_name
# this has to be cached here due to the socket.getfqdn call failing
# in os.fork
self.mailname = mailname()
def load(self):
"""
Load the configuration file and parse.
:raises ConfigException: When the configuration is invalid.
:returns: :class:`Config`.
.. note::
Spaces, single and double quotes will be stripped. Lines beginning
# will be ignored. # comments in-line will be stripped out and
ignored.
i.e.
# listen = :1025, :::1025 ->
listen = :25, :::25 # IPv4 & IPv6 -> listen = :25, :::25
"""
if self.config_file is None:
return self
if not os.access(self.config_file, os.R_OK):
msg = "Config file does not exist or is not readable."
raise ConfigException(msg)
with open(self.config_file, "r") as _conf_file:
for line in _conf_file.readlines():
line = line.strip()
if line.startswith("#"):
continue
if line.strip() == "":
continue
if "#" in line:
line = line.split("#")[0]
if line.count("=") >= 1:
key, value = line.split("=", 1)
key, value = key.strip(), value.strip()
self.validate_option(key)
value = value.replace('"', "").replace("'", "")
setattr(self, key, value)
else:
self.validate_option(line)
return self
def validate_option(self, key):
"""
Validate config option is actually... valid...
https://kura.gg/blackhole/configuration.html#configuration-options
:param str key: Configuration option.
:raises ConfigException: When an invalid option is configured.
"""
if key == "":
return
attributes = inspect.getmembers(
self,
lambda a: not (inspect.isroutine(a)),
)
attrs = [
a[0][1:]
for a in attributes
if not (a[0].startswith("__") and a[0].endswith("__"))
and a[0].startswith("_")
]
if key not in attrs:
_attrs = "', '".join(attrs[:-1])
valid_attrs = f"'{_attrs}' and '{attrs[-1]}'"
msg = (
f"Invalid configuration option '{key}'.\n\nValid options "
f"are: {valid_attrs}"
)
raise ConfigException(msg)
@property
def workers(self):
"""
How many workers to spawn to handle incoming connections.
https://kura.gg/blackhole/configuration.html#workers
:returns: Number of workers. Default: ``1``
:rtype: :py:obj:`int`
.. note::
Default value is 1.
A supervisor process will always exist separately from the workers.
"""
return int(self._workers) or 1
@workers.setter
def workers(self, workers):
self._workers = workers
@property
def listen(self):
"""
Address, port and socket family.
https://kura.gg/blackhole/configuration.html#listen
:returns: Listeners.
:rtype: :py:obj:`list`
.. note::
Default IPv4:
[('127.0.0.1', 25, socket.AF_INET,
('127.0.0.1', 587, socket.AF_INET), ]
Default IPv6:
[('127.0.0.1', 25, socket.AF_INET),
('127.0.0.1', 587, socket.AF_INET), ]
"""
ipv4 = [
("127.0.0.1", 25, socket.AF_INET, {}),
("127.0.0.1", 587, socket.AF_INET, {}),
]
ipv6 = [
("::", 25, socket.AF_INET6, {}),
("::", 587, socket.AF_INET6, {}),
]
default = ipv4 + ipv6 if socket.has_ipv6 else ipv4
return self._listen or default
@listen.setter
def listen(self, addrs):
self._listen = self._listeners(addrs)
@property
def tls_listen(self):
"""
Address and port and socket family for SSL/TLS connections.
https://kura.gg/blackhole/configuration.html#tls-listen
:returns: TLS listeners. Default: ``[]``
:rtype: :py:obj:`list`
"""
return self._tls_listen or []
@tls_listen.setter
def tls_listen(self, addrs):
self._tls_listen = self._listeners(addrs)
@property
def user(self):
"""
UNIX user.
https://kura.gg/blackhole/configuration.html#user
:returns: User name.
:rtype: :py:obj:`str`
.. note::
Defaults to the current user.
"""
return self._user
@user.setter
def user(self, user):
self._user = user
@property
def group(self):
"""
UNIX group.
https://kura.gg/blackhole/configuration.html#group
:returns: Group name.
:rtype: :py:obj:`str`
.. note::
Defaults to the current group.
"""
return self._group
@group.setter
def group(self, group):
self._group = group
@property
def timeout(self):
"""
Timeout in seconds.
https://kura.gg/blackhole/configuration.html#timeout
:returns: Timeout in seconds. Default: ``60``
:rtype: :py:obj:`int`
.. note::
Defaults to 60 seconds.
Cannot be more 180 seconds for security (denial of service).
"""
return int(self._timeout)
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
@property
def tls_key(self):
"""
TLS key file.
https://kura.gg/blackhole/configuration.html#tls-key
:returns: Path to a TLS key file. Default: ``None``
:rtype: :py:obj:`str`
"""
return self._tls_key
@tls_key.setter
def tls_key(self, tls_key):
if tls_key is not None:
self._tls_key = tls_key
@property
def tls_cert(self):
"""
TLS certificate file.
https://kura.gg/blackhole/configuration.html#tls-cert
:returns: Path to a TLS certificate. Default: ``None``
:rtype: :py:obj:`str`
"""
return self._tls_cert
@tls_cert.setter
def tls_cert(self, tls_cert):
if tls_cert is not None:
self._tls_cert = tls_cert
@property
def tls_dhparams(self):
"""
Diffie Hellman ephemeral parameters.
https://kura.gg/blackhole/configuration.html#tls-dhparams
:returns: Path to a file containing dhparams. Default: ``None``
:rtype: :py:obj:`str`
"""
return self._tls_dhparams
@tls_dhparams.setter
def tls_dhparams(self, tls_dhparams):
if tls_dhparams is not None:
self._tls_dhparams = tls_dhparams
@property
def pidfile(self):
"""
Path to store the pid.
https://kura.gg/blackhole/configuration.html#pidfile
:returns: Path to a pid file. Default: ``/tmp/blackhole.pid``.
:rtype: :py:obj:`str`
"""
return self._pidfile
@pidfile.setter
def pidfile(self, pidfile):
if pidfile is not None:
self._pidfile = pidfile
@property
def delay(self):
"""
Delay in seconds.
https://kura.gg/blackhole/configuration.html#delay
:returns: Delay in seconds. Default: ``None``
:rtype: :py:obj:`int` or :py:obj:`None`
.. note::
Defaults to :py:obj:`None`.
Cannot be higher than 60 seconds for security (denial of service).
"""
if self._delay is not None:
return int(self._delay)
return None
@delay.setter
def delay(self, delay):
self._delay = delay
@property
def mode(self):
"""
Mode with which to respond.
https://kura.gg/blackhole/configuration.html#mode
:returns: A response mode. Default: ``accept``.
:rtype: :py:obj:`str`
.. note::
Defaults to 'accept'.
Options: 'accept', 'bounce' and 'random'.
"""
return self._mode
@mode.setter
def mode(self, mode):
self._mode = mode.lower()
@property
def max_message_size(self):
"""
Maximum size, in bytes, of a message.
https://kura.gg/blackhole/configuration.html#max-message-size
:returns: Maximum message size in bytes. Default: ``512000``.
:rtype: :py:obj:`int`
.. note::
Default 512000 bytes (512 KB).
"""
if self._max_message_size is not None:
return int(self._max_message_size)
return None
@max_message_size.setter
def max_message_size(self, size):
self._max_message_size = size
@property
def dynamic_switch(self):
"""
Enable or disable dynamic switches.
https://kura.gg/blackhole/configuration.html#dynamic-switch
:returns: Whether dynamic switches are enabled or not. Default:
``True``.
:rtype: :py:obj:`bool`
.. note::
Allowed values are :py:obj:`True` and :py:obj:`False`.
Default: :py:obj:`True`
"""
if self._dynamic_switch is None:
return True
return self._dynamic_switch
@dynamic_switch.setter
def dynamic_switch(self, switch):
if switch.lower() == "false":
self._dynamic_switch = False
elif switch.lower() == "true":
self._dynamic_switch = True
else:
msg = f"{switch} is not valid. Options are true or false."
raise ConfigException(msg)
def _convert_port(self, port):
"""
Convert a port from the configuration files' string to an integer.
:param str port: A port number.
:type port: :py:obj:`str`
:raises ConfigException: If an invalid port number if provided.
:returns: A port number.
:rtype: :py:obj:`int`
"""
try:
return int(port)
except ValueError:
msg = f"{port} is not a valid port number."
raise ConfigException(msg)
def _listeners(self, listeners):
"""
Convert listeners lines from the configuration to usable values.
:param str listeners: A list of addresses and ports, separated by
commas.
-- e.g. '127.0.0.1:25, 10.0.0.1:25, :25, :::25'
:returns: List of addresses and sockets to listen on.
:rtype: :py:obj:`list` or :py:obj:`None`
"""
clisteners = []
_listeners = listeners.split(",")
if len(_listeners) == 0:
return None
for listener in _listeners:
listener = listener.strip()
parts = listener.split(" ")
addr_port = parts[0]
port = addr_port.split(":")[-1].strip()
addr = addr_port.replace(f":{port}", "").strip()
family = socket.AF_INET
if ":" in addr:
family = socket.AF_INET6
flags = {}
if len(parts) > 1:
flags = self.create_flags(parts[1:])
host = (addr, self._convert_port(port), family, flags)
clisteners.append(host)
return clisteners
def flags_from_listener(self, addr, port):
"""
Get a list of flags defined for the provided listener.
Scope: ``listen``, ``tls_listen``.
:param str addr: The listener host address.
:param int port: The listener port.
:returns: Flags defined for this socket. Default: ``{}``.
:rtype: :py:obj:`dict`
.. note::
If multiple modes or delays are listed in a single listener, the
last definition will be used:
``listen = :25 mode=bounce mode=random`` -> ``mode=random``
A mode and delay can be used in tandum:
``listen = :25 mode=bounce delay=10``
The delay can also be specified as a range:
``listen = :25 delay=5-10``
Using a delay range will cause the server to choose a random value
within that range per connection.
Mode and delay can be defined for each address/port in a listen
directive:
``listen = :25 mode=bounce, :::25 delay=10, :587 mode=random``
"""
if addr in ("127.0.0.1", "0.0.0.0"): # nosec
addr = ""
elif addr in ("::1",):
addr = "::"
listeners = self.listen + self.tls_listen
for laddr, lport, __, lflags in listeners:
if laddr == addr and lport == port:
return lflags
return {}
def create_flags(self, parts):
"""
Create a set of flags from a listener directive.
:param list parts: Parts of the listener definition.
:returns: Flags for a listener. Default: ``{}``.
:rtype: :py:obj:`dict`
"""
flags = {}
for part in parts:
if part.count("=") == 1:
flag, value = part.split("=")
flag, value = flag.strip(), value.strip()
if flag in ("mode", "delay"):
if flag == "mode":
flags.update(self._flag_mode(flag, value))
elif flag == "delay":
flags.update(self._flag_delay(flag, value))
return flags
def _flag_mode(self, flag, value):
"""
Create a flag for the mode directive.
:param str flag: The flag name.
:param str value: The value of the flag.
:returns: Mode flag for a listener.
:rtype: :py:obj:`dict`
:raises ConfigException: If an invalid mode is provided.
"""
if value in ("accept", "bounce", "random"):
return {flag: value}
else:
raise ConfigException(
f"'{value}' is not a valid mode. Valid options "
"are: 'accept', 'bounce' and 'random'.",
)
def _flag_delay(self, flag, value):
"""
Create a delay flag, delay can be an int or a range.
:param str flag: The flag name.
:param str value: The value of the flag.
:returns: Delay flag for a listener.
:rtype: :py:obj:`dict`
:raises ConfigException: If an invalid delay is provided.
"""
if value.count("-") == 0:
if value.isdigit() and int(value) < 60:
return {flag: value}
else:
raise ConfigException(
f"'{value}' is not a valid delay. Delay is "
"in seconds and must be below 60.",
)
if value.count("-") == 1:
start, end = value.split("-")
start, end = start.strip(), end.strip()
if (
start.isdigit()
and end.isdigit()
and int(start) < 60
and int(end) < 60
and int(end) > int(start)
):
return {flag: (start, end)}
raise ConfigException(
f"'{value}' is not a valid delay value. It must be "
"either a single value or a range i.e. 5-10 and "
"must be less than 60.",
)
def test(self):
r"""
Test configuration validity.
:returns: The configuration object.
:rtype: :class:`Config`
.. note::
Uses the magic of :py:func:`inspect.getmembers` to introspect
methods beginning with \'test\_\' and calling them.
"""
members = inspect.getmembers(self, predicate=inspect.ismethod)
for name, _ in members:
if name.startswith("test_"):
getattr(self, name)()
return self
def test_workers(self):
"""
Validate the number of workers.
:raises ConfigException: If an invalid number of workers is provided.
.. note::
Cannot have more workers than number of processors or cores.
"""
cpus = multiprocessing.cpu_count()
if self.workers > cpus:
msg = (
"Cannot have more workers than number of processors or "
f"cores. {self.workers} workers > {cpus} processors/cores."
)
raise ConfigException(msg)
def test_ipv6_support(self):
"""
If an IPv6 listener is configured, confirm IPv6 is supported.
:raises ConfigException: If IPv6 is configured but is not supported by
the operating system.
"""
for address, __, family, ___ in self.listen:
if ":" in address:
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
msg = (
"An IPv6 listener is configured but IPv6 is not "
"available on this platform."
)
raise ConfigException(msg)
def test_tls_ipv6_support(self):
"""
If an IPv6 listener is configured, confirm IPv6 is supported.
:raises ConfigException: If IPv6 is configured but is not supported by
the operating system.
"""
for address, __, family, ___ in self.tls_listen:
if ":" in address:
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
msg = (
"An IPv6 listener is configured but IPv6 is not "
"available on this platform."
)
raise ConfigException(msg)
def test_same_listeners(self):
"""
Test that multiple listeners are not configured on the same port.
:raises ConfigException: When multiple listeners are configured on the
same port.
.. note::
IPv4 and IPv6 addresses are different sockets so they can listen on
the same port because they have different addresses.
"""
if len(self.listen) == 0 and len(self.tls_listen) == 0:
return
listen, tls_listen = [], []
for llisten in self.listen:
addr, port, family, flags = llisten
listen.append((addr, port, family))
for llisten in self.tls_listen:
addr, port, family, flags = llisten
tls_listen.append((addr, port, family))
if set(listen).intersection(tls_listen):
msg = (
"Cannot have multiple listeners on the same address and "
"port."
)
raise ConfigException(msg)
def test_no_listeners(self):
"""
Test that at least one listener is configured.
:raises ConfigException: When no listeners are configured.
"""
if not len(self.listen) > 0 and not len(self.tls_listen) > 0:
msg = "You need to define at least one listener."
raise ConfigException(msg)
def _min_max_port(self, port):
"""
Minimum and maximum allowed port.
:param int port: The port to test for validity.
:raises ConfigException: When port is outside of the allowed range.
.. note::
On Linux the minimum is 1 and maximum is 65535.
"""
min_port, max_port = 1, 65535
if port < min_port:
msg = (
f"Port number {port} is not usable because it is less than "
f"{min_port} which is the lowest available port."
)
raise ConfigException(msg)
if port > max_port:
msg = (
f"Port number {port} is not usable because it is less than "
f"{max_port} which is the highest available port."
)
raise ConfigException(msg)
def test_port(self):
"""
Validate port number.
:raises ConfigException: When a port is configured that we have no
permissions for.
"""
for host, port, family, __ in self.listen:
self._port_permissions(host, port, family)
def _port_permissions(self, address, port, family):
"""
Validate that we have permission to use the port and it's not in use.
:param str address: The address to use.
:param int port: The port to use.
:param family: The type of socket to use.
:type family: :py:obj:`socket.AF_INET` or :py:obj:`socket.AF_INET6`
:raises ConfigException: When a port is supplied that we have no
permissions for.
"""
self._min_max_port(port)
if os.getuid() != 0 and port < 1024:
msg = f"You do not have permission to use port {port}"
raise ConfigException(msg)
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except (AttributeError, OSError):
pass
if family == socket.AF_INET6:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
try:
sock.bind((address, port))
except OSError as err:
errmsg = err.strerror
msg = f"Could not use port {port}, {errmsg}"
raise ConfigException(msg)
finally:
sock.close()
del sock
def test_user(self):
"""
Validate user exists in UNIX password database.
:raises ConfigException: When the user cannot be accessed on the
operating system.
.. note::
Defaults to :py:func:`getpass.getuser` if no user is specified.
"""
try:
pwd.getpwnam(self.user)
except KeyError:
msg = f"{self._user} is not a valid user."
raise ConfigException(msg)
def test_group(self):
"""
Validate group exists in UNIX group database.
:raises ConfigException: When the group cannot be accessed on the
operating system.
.. note::
Defaults to :py:attr:`grp.getgrgid.gr_name` if no group is
specified.
"""
try:
grp.getgrnam(self.group)
except KeyError:
msg = f"{self._group} is a not a valid group."
raise ConfigException(msg)
def test_timeout(self):
"""
Validate timeout - only allow a valid integer value in seconds.
:raises ConfigException: When the timeout is not a number or is above
the maximum allowed value of 180.
"""
try:
__ = self.timeout # NOQA
except ValueError:
msg = f"{self._timeout} is not a valid number of seconds."
raise ConfigException(msg)
if self.timeout and self.timeout > 180:
msg = (
"Timeout must be at least 180 seconds or less for security "
"(denial of service)."
)
raise ConfigException(msg)
def test_tls_port(self):
"""
Validate TLS port number.
:raises ConfigException: When a port is configured that we have no
permissions for.
"""
if len(self.tls_listen) == 0:
return
for host, port, af, __ in self.tls_listen:
self._port_permissions(host, port, af)
def test_tls_settings(self):
"""
Validate TLS configuration.
:raises ConfigException: When the TLS configuration is invalid.
.. note::
Verifies if you provide all TLS settings, not just some.
"""
port = True
if not len(self.tls_listen) > 0:
port = False
cert = os.access(self.tls_cert, os.R_OK) if self.tls_cert else False
key = os.access(self.tls_key, os.R_OK) if self.tls_key else False
if (port, cert, key) == (False, False, False):
return
if not all((port, cert, key)):
msg = (
"To use TLS you must supply a port, certificate file "
"and key file."
)
raise ConfigException(msg)
def test_tls_dhparams(self):
"""
Validate Diffie Hellman ephemeral parameters.
:raises ConfigException: When the dhparams file is invalid.
.. note::
Verifies Diffie Hellman ephemeral parameters are readable.
"""
if self.tls_dhparams and not os.access(self.tls_dhparams, os.R_OK):
msg = (
"To use Diffie Hellman ephemeral params you must supply a "
"valid dhparams file."
)
raise ConfigException(msg)
def test_delay(self):
"""
Validate the delay period.
:raises ConfigException: When the delay is not a number or is above
the maximum allowed value of 60.
.. note::
Delay must be lower than the timeout.
"""
if self.delay and self.delay >= self.timeout:
msg = "Delay must be lower than timeout."
raise ConfigException(msg)
if self.delay and self.delay > 60:
msg = (
"Delay must be 60 seconds or less for security (denial of "
"service)."
)
raise ConfigException(msg)
def test_mode(self):
"""
Validate the response mode.
:raise ConfigException: When an invalid mode is configured.
.. note::
Valid options are: 'accept', 'bounce' and 'random'.
"""
if self.mode not in ("accept", "bounce", "random"):
msg = "Mode must be accept, bounce or random."
raise ConfigException(msg)
def test_max_message_size(self):
"""
Validate max_message size is an integer.
:raise ConfigException: When the maximum message size is not a number.
"""
try:
__ = self.max_message_size # NOQA
except ValueError:
size = self._max_message_size
msg = f"{size} is not a valid number of bytes."
raise ConfigException(msg)
def test_pidfile(self):
"""
Validate that the pidfile can be written to.
:raises ConfigException: When the pidfile is invalid.
"""
if not self.pidfile:
return
pidfile = pathlib.Path(self.pidfile)
if not os.access(pidfile.parent, os.W_OK):
msg = "You do not have permission to write to the pidfile."
raise ConfigException(msg)
def test_dynamic_switch(self):
"""
Validate that the dynamic_switch value is correct.
:raises ConfigException: When the dynamic switch value is invalid.
"""
if self._dynamic_switch is None:
return
if self._dynamic_switch not in (True, False):
msg = "Allowed dynamic_switch values are true and false."
raise ConfigException(msg)
|
{
"content_hash": "bffe6f26b83decd12c2ff76ae47598a0",
"timestamp": "",
"source": "github",
"line_count": 1106,
"max_line_length": 78,
"avg_line_length": 30.084086799276673,
"alnum_prop": 0.5405283563249481,
"repo_name": "kura/blackhole",
"id": "8b67afd79d441fcc79dd6f6674ffcac9e217fc11",
"size": "34409",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "blackhole/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Python",
"bytes": "273799"
},
{
"name": "Shell",
"bytes": "5175"
}
],
"symlink_target": ""
}
|
"""
accessor.py contains base classes for implementing accessor properties
that can be mixed into or pinned onto other pandas classes.
"""
from typing import Set
import warnings
from pandas.util._decorators import Appender
class DirNamesMixin:
_accessors = set() # type: Set[str]
_deprecations = frozenset(
["asobject", "base", "data", "flags", "itemsize", "strides"]
)
def _dir_deletions(self):
"""
Delete unwanted __dir__ for this object.
"""
return self._accessors | self._deprecations
def _dir_additions(self):
"""
Add additional __dir__ for this object.
"""
rv = set()
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
except AttributeError:
pass
return rv
def __dir__(self):
"""
Provide method name lookup and completion
Only provide 'public' methods.
"""
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
class PandasDelegate:
"""
An abstract base class for delegating methods/properties.
"""
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the " "property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False):
"""
Add accessors to cls from the delegate class.
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
accessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists.
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(
fget=_getter, fset=_setter, doc=getattr(delegate, name).__doc__
)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == "property":
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
def delegate_names(delegate, accessors, typ, overwrite=False):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
the class to get methods/properties & doc-strings
accessors : Sequence[str]
List of accessor to add
typ : {'property', 'method'}
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
"""
def add_delegate_accessors(cls):
cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite)
return cls
return add_delegate_accessors
# Ported with modifications from xarray
# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
# 2. We use a UserWarning instead of a custom Warning
class CachedAccessor:
"""
Custom property-like object (descriptor) for caching accessors.
Parameters
----------
name : str
The namespace this will be accessed under, e.g. ``df.foo``
accessor : cls
The class with the extension methods. The class' __init__ method
should expect one of a ``Series``, ``DataFrame`` or ``Index`` as
the single argument ``data``
"""
def __init__(self, name, accessor):
self._name = name
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Dataset.geo
return self._accessor
accessor_obj = self._accessor(obj)
# Replace the property with the accessor object. Inspired by:
# http://www.pydanny.com/cached-property.html
# We need to use object.__setattr__ because we overwrite __setattr__ on
# NDFrame
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
def _register_accessor(name, cls):
def decorator(accessor):
if hasattr(cls, name):
warnings.warn(
"registration of accessor {!r} under name {!r} for type "
"{!r} is overriding a preexisting attribute with the same "
"name.".format(accessor, name, cls),
UserWarning,
stacklevel=2,
)
setattr(cls, name, CachedAccessor(name, accessor))
cls._accessors.add(name)
return accessor
return decorator
_doc = """
Register a custom accessor on %(klass)s objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
%(others)s
Notes
-----
When accessed, your accessor will be initialized with the pandas object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, pandas_object): # noqa: E999
...
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.
>>> pd.Series(['a', 'b']).dt
Traceback (most recent call last):
...
AttributeError: Can only use .dt accessor with datetimelike values
Examples
--------
In your library code::
import pandas as pd
@pd.api.extensions.register_dataframe_accessor("geo")
class GeoAccessor:
def __init__(self, pandas_obj):
self._obj = pandas_obj
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
Back in an interactive IPython session:
>>> ds = pd.DataFrame({'longitude': np.linspace(0, 10),
... 'latitude': np.linspace(0, 20)})
>>> ds.geo.center
(5.0, 10.0)
>>> ds.geo.plot()
# plots data on a map
"""
@Appender(
_doc
% dict(
klass="DataFrame",
others=("register_series_accessor, " "register_index_accessor"),
)
)
def register_dataframe_accessor(name):
from pandas import DataFrame
return _register_accessor(name, DataFrame)
@Appender(
_doc
% dict(
klass="Series",
others=("register_dataframe_accessor, " "register_index_accessor"),
)
)
def register_series_accessor(name):
from pandas import Series
return _register_accessor(name, Series)
@Appender(
_doc
% dict(
klass="Index",
others=("register_dataframe_accessor, " "register_series_accessor"),
)
)
def register_index_accessor(name):
from pandas import Index
return _register_accessor(name, Index)
|
{
"content_hash": "757089634abb64dc20c548a453e9542e",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 85,
"avg_line_length": 27.58957654723127,
"alnum_prop": 0.6017709563164109,
"repo_name": "toobaz/pandas",
"id": "f84033e9c3c90fc7807087ab9a08d03ad3e03634",
"size": "8470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/core/accessor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
try:
from collections.abc import Sequence, Mapping
except ImportError:
from collections import Sequence, Mapping
__all__ = ['BROKEN_ITERABLE', 'BROKEN_SEQUENCE', 'BROKEN_MAPPING']
class BrokenIterable:
def __iter__(self):
yield 'x'
raise ValueError(type(self).__name__)
def __getitem__(self, item):
return item
def __len__(self):
return 2
class BrokenSequence(BrokenIterable, Sequence):
pass
class BrokenMapping(BrokenIterable, Mapping):
pass
BROKEN_ITERABLE = BrokenIterable()
BROKEN_SEQUENCE = BrokenSequence()
BROKEN_MAPPING = BrokenMapping()
|
{
"content_hash": "3f5cb9e7bc4d0c93b0c1f9fa116827b6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 66,
"avg_line_length": 18.235294117647058,
"alnum_prop": 0.6725806451612903,
"repo_name": "robotframework/robotframework",
"id": "2f808768dc45fdc880d76cd278f360964d34ad60",
"size": "620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "atest/testdata/standard_libraries/builtin/broken_containers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44632"
},
{
"name": "HTML",
"bytes": "86871"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2764220"
},
{
"name": "RobotFramework",
"bytes": "1260097"
}
],
"symlink_target": ""
}
|
"""Extend functionality from webob.dec.wsgify for Placement API."""
import webob
from oslo_log import log as logging
from webob.dec import wsgify
from nova.api.openstack.placement import util
LOG = logging.getLogger(__name__)
class PlacementWsgify(wsgify):
def call_func(self, req, *args, **kwargs):
"""Add json_error_formatter to any webob HTTPExceptions."""
try:
super(PlacementWsgify, self).call_func(req, *args, **kwargs)
except webob.exc.HTTPException as exc:
LOG.debug("Placement API returning an error response: %s", exc)
exc.json_formatter = util.json_error_formatter
raise
|
{
"content_hash": "be2d7d9053f1dbfc84a1ae90ae2a37d0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 30.227272727272727,
"alnum_prop": 0.6736842105263158,
"repo_name": "vmturbo/nova",
"id": "4aa8b789ba5f9051305aba53d9ef68cc224624e2",
"size": "1237",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/api/openstack/placement/wsgi_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "18983608"
},
{
"name": "Shell",
"bytes": "31813"
},
{
"name": "Smarty",
"bytes": "307089"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
TIMESERIES_COL = "height"
N_OUTPUTS = 1 # in each sequence, 1-49 are features, and 50 is label
SEQ_LEN = None
DEFAULTS = None
N_INPUTS = None
def init(hparams):
global SEQ_LEN, DEFAULTS, N_INPUTS
SEQ_LEN = hparams["sequence_length"]
DEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]
N_INPUTS = SEQ_LEN - N_OUTPUTS
def linear_model(hparams):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape = [N_INPUTS], name = TIMESERIES_COL))
model.add(tf.keras.layers.Dense(units = 1, activation = None))
return model
def dnn_model(hparams):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape = [N_INPUTS], name = TIMESERIES_COL))
model.add(tf.keras.layers.Dense(units = 30, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(units = 10, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(units = 1, activation = None))
return model
def cnn_model(hparams):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape = [N_INPUTS], name = TIMESERIES_COL))
model.add(tf.keras.layers.Reshape(target_shape = [N_INPUTS, 1]))
model.add(tf.keras.layers.Conv1D(filters = N_INPUTS // 2, kernel_size = 3, padding = "same", activation = tf.nn.relu))
model.add(tf.keras.layers.MaxPooling1D(pool_size = 2, strides = 2))
model.add(tf.keras.layers.Conv1D(filters = N_INPUTS // 2, kernel_size = 3, padding = "same", activation = tf.nn.relu))
model.add(tf.keras.layers.MaxPooling1D(pool_size = 2, strides = 2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units = 3, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(units = 1, activation = None))
return model
def rnn_model(hparams):
CELL_SIZE = N_INPUTS // 3 # size of the internal state in each of the cells
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape = [N_INPUTS], name = TIMESERIES_COL))
model.add(tf.keras.layers.Reshape(target_shape = [N_INPUTS, 1]))
model.add(tf.keras.layers.LSTM(units = CELL_SIZE))
model.add(tf.keras.layers.Dense(units = N_INPUTS // 2, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(units = 1, activation = None))
return model
# 2-layer RNN
def rnn2_model(hparams):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape = [N_INPUTS], name = TIMESERIES_COL))
model.add(tf.keras.layers.Reshape(target_shape = [N_INPUTS, 1]))
model.add(tf.keras.layers.LSTM(units = N_INPUTS * 2, return_sequences = True))
model.add(tf.keras.layers.LSTM(units = N_INPUTS // 2))
model.add(tf.keras.layers.Dense(units = (N_INPUTS // 2) // 2, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(units = 1, activation = None))
return model
# Read data and convert to needed format
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(row):
# Row is a string tensor containing the contents of one row
features = tf.decode_csv(records = row, record_defaults = DEFAULTS) # string tensor -> list of 50 rank 0 float tensors
label = features.pop() # remove last feature and use as label
features = tf.stack(values = features, axis = 0) # list of rank 0 tensors -> single rank 1 tensor
return {TIMESERIES_COL: features}, label
# Create list of file names that match "glob" pattern (i.e. data_file_*.csv)
dataset = tf.data.Dataset.list_files(file_pattern = filename)
# Read in data from files
dataset = dataset.flat_map(map_func = tf.data.TextLineDataset)
# Parse text lines as comma-separated values (CSV)
dataset = dataset.map(map_func = decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # loop indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(count = num_epochs).batch(batch_size = batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(dtype = tf.float32, shape = [None, N_INPUTS])
}
features = {
key: tf.expand_dims(input = tensor, axis = -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(input = features[TIMESERIES_COL], axis = 2)
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Wrapper function to build selected Keras model type
def sequence_regressor(hparams):
# 1. Run the appropriate model
model_functions = {
"linear": linear_model,
"dnn": dnn_model,
"cnn": cnn_model,
"rnn": rnn_model,
"rnn2": rnn2_model}
# Get function pointer for selected model type
model_function = model_functions[hparams["model"]]
# Build selected Keras model
model = model_function(hparams)
return model
def train_and_evaluate(output_dir, hparams):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
# Build Keras model
model = sequence_regressor(hparams)
# Compile Keras model with optimizer, loss function, and eval metrics
model.compile(
optimizer = "adam",
loss = "mse",
metrics = ["mse"])
# Convert Keras model to an Estimator
estimator = tf.keras.estimator.model_to_estimator(
keras_model = model,
model_dir = output_dir,
config = tf.estimator.RunConfig(save_checkpoints_secs = hparams["min_eval_frequency"]))
# Set estimator's train_spec to use train_input_fn and train for so many steps
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset(
filename = hparams['train_data_path'],
mode = tf.estimator.ModeKeys.TRAIN,
batch_size = hparams['train_batch_size']),
max_steps = hparams['train_steps'])
# Create exporter that uses serving_input_fn to create saved_model for serving
exporter = tf.estimator.LatestExporter(name = 'exporter', serving_input_receiver_fn = serving_input_fn)
# Set estimator's eval_spec to use eval_input_fn and export saved_model
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset(
filename = hparams['eval_data_path'],
mode = tf.estimator.ModeKeys.EVAL,
batch_size = 1000),
steps = None,
exporters = exporter,
start_delay_secs = hparams['eval_delay_secs'],
throttle_secs = hparams['min_eval_frequency'])
# Run train_and_evaluate loop
tf.estimator.train_and_evaluate(
estimator = estimator,
train_spec = train_spec,
eval_spec = eval_spec)
|
{
"content_hash": "3d125ceaed3c6f2c8415ec577bb63571",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 131,
"avg_line_length": 39.89010989010989,
"alnum_prop": 0.6535812672176309,
"repo_name": "turbomanage/training-data-analyst",
"id": "4146bca4030b0e41e0cb8e896f248806f0d2a19e",
"size": "7881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "courses/machine_learning/deepdive/09_sequence_keras/sinemodel/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19768"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "13208"
},
{
"name": "Dockerfile",
"bytes": "35682"
},
{
"name": "HTML",
"bytes": "2069111"
},
{
"name": "Java",
"bytes": "1539437"
},
{
"name": "JavaScript",
"bytes": "2540305"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "61371931"
},
{
"name": "Makefile",
"bytes": "4118"
},
{
"name": "PLpgSQL",
"bytes": "5868"
},
{
"name": "PigLatin",
"bytes": "393"
},
{
"name": "Python",
"bytes": "9553863"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Shell",
"bytes": "390786"
},
{
"name": "TSQL",
"bytes": "34160"
}
],
"symlink_target": ""
}
|
from iptest.assert_util import skiptest
skiptest("win32", "cli64", "posix")
from iptest.cominterop_util import *
from iptest.file_util import file_exists, delete_files
import nt
#------------------------------------------------------------------------------
#--SANITY CHECK
if not IsWordInstalled():
from sys import exit
print("Word is not installed. Cannot run this test!")
exit(1)
else:
TryLoadWordInteropAssembly()
from Microsoft.Office.Interop import Word
#------------------------------------------------------------------------------
#--HELPERS
def IsWordPIAInstalled():
from Microsoft.Win32 import Registry
word_pia_registry = None
wordapp_pia_registry = Registry.ClassesRoot.OpenSubKey("CLSID\\{000209FF-0000-0000-C000-000000000046}\\InprocServer32")
#worddoc_pia_registry = Registry.ClassesRoot.OpenSubKey("CLSID\\{00020906-0000-0000-C000-000000000046}\\InprocServer32")
return wordapp_pia_registry != None
def wd_selection_change_eventhandler(range):
global SELECTION_COUNTER
SELECTION_COUNTER = SELECTION_COUNTER + 1
#print "selected range - ", range.Start, range.End
def add_wordapp_event(wdapp):
wdapp.WindowSelectionChange += wd_selection_change_eventhandler
def remove_wordapp_event(wdapp):
wdapp.WindowSelectionChange -= wd_selection_change_eventhandler
def get_range(doc, start, end):
return doc.Range(start, end)
def quit_word(wd):
if IS_PIA_INSTALLED :
wd.Quit(clr.Reference[System.Object](0))
else:
wd.Quit(0)
#------------------------------------------------------------------------------
#--GLOBALS
IS_PIA_INSTALLED = IsWordPIAInstalled()
SELECTION_COUNTER = 0
word = CreateWordApplication()
doc = word.Documents.Add()
#------------------------------------------------------------------------------
#--TEST CASES
def test_word_typelibsupport():
# load Word namespace directly from the TypeLib
typeLib = clr.LoadTypeLibrary(System.Guid("00020905-0000-0000-C000-000000000046"))
# we can get some information about he typelib
Assert( typeLib.Name == 'Word')
Assert( System.String.ToUpper(typeLib.Guid.ToString()) == '00020905-0000-0000-C000-000000000046')
# check version information is available and does not throw
typeLib.VersionMajor
typeLib.VersionMinor
# check typeLib exposes only those discoverable methods
Assert( dir(typeLib).__len__() == 5 )
Assert( 'Word' in dir(typeLib) );
Assert( 'Name' in dir(typeLib) );
Assert( 'Guid' in dir(typeLib) );
Assert( 'VersionMajor' in dir(typeLib) );
Assert( 'VersionMinor' in dir(typeLib) );
# check some coclasses are present in Word's namespace
Assert('Application' in dir(typeLib.Word))
Assert('Document' in dir(typeLib.Word))
# check some enums are present in Word's namespace
Assert('WdCountry' in dir(typeLib.Word))
Assert('WdSaveFormat' in dir(typeLib.Word))
Assert('WdXMLNodeType' in dir(typeLib.Word))
# check we can explore the content of enums
Assert('wdFormatXML' in dir(typeLib.Word.WdSaveFormat))
Assert('wdUS' in dir(typeLib.Word.WdCountry))
#check we can access enums' values
Assert(typeLib.Word.WdCountry.wdUS == 1)
# verify namespace Word is not yet available
try:
Word.__class__
except NameError: pass
else: Fail("namespace Word has not been imported yet")
# Now let's do above tests but with imported namespace
clr.AddReferenceToTypeLibrary(typeLib.Guid)
# verify namespace Word is not yet available
try:
Word.__class__
except NameError: pass
else: Fail("namespace Word has not been imported yet")
from . import Word
# check __class__ extension is available
Word.__class__
# check some coclasses are present in Word's namespace
Assert('Application' in dir(Word))
Assert('Document' in dir(Word))
# check some expected enums are present in Word's namespace
Assert('WdCountry' in dir(Word))
Assert('WdSaveFormat' in dir(Word))
Assert('WdXMLNodeType' in dir(Word))
# check we can explore the content of enums
Assert('wdFormatXML' in dir(Word.WdSaveFormat))
Assert('wdUS' in dir(Word.WdCountry))
#check we can access enums' values
Assert(Word.WdCountry.wdUS == 1)
def test_wordevents():
global SELECTION_COUNTER
SELECTION_COUNTER = 0
if IS_PIA_INSTALLED:
print("Found PIAs for Word")
else:
print("No PIAs for Word were Found!!!!")
doc.Range().Text = "test"
add_wordapp_event(word)
get_range(doc, 1, 1).Select()
AreEqual(SELECTION_COUNTER, 1)
add_wordapp_event(word)
get_range(doc, 1, 2).Select()
AreEqual(SELECTION_COUNTER, 3)
remove_wordapp_event(word)
get_range(doc, 2, 2).Select()
AreEqual(SELECTION_COUNTER, 4)
remove_wordapp_event(word)
get_range(doc, 2, 3).Select()
AreEqual(SELECTION_COUNTER, 4)
def test_spellChecker():
suggestions = word.GetSpellingSuggestions("waht")
Assert(suggestions.Count > 5)
# This tests for enumeration support over COM objects
suggestions = [s.Name for s in suggestions.GetEnumerator()]
# Check to see that some expected suggestions actually exist
Assert("what" in suggestions.GetEnumerator())
Assert("with" in suggestions.GetEnumerator())
def test_word_basic():
'''
http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=14166
'''
temp_file_name = nt.tempnam() + ".word_basic.doc"
word_basic = getRCWFromProgID("Word.Basic")
if is_snap:
word_basic.AppShow()
word_basic.FileNewDefault()
word_basic.Insert("some stuff...")
word_basic.FileSaveAs(temp_file_name)
if is_snap:
word_basic.AppHide()
del word_basic
Assert(file_exists(temp_file_name))
delete_files(temp_file_name)
#------------------------------------------------------------------------------
try:
run_com_test(__name__, __file__)
finally:
quit_word(word)
|
{
"content_hash": "a5738af1b21d0ae37c570c03215645ef",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 124,
"avg_line_length": 31.647368421052633,
"alnum_prop": 0.642275070680193,
"repo_name": "IronLanguages/ironpython3",
"id": "2b63ac57f8a037e9e399e21d004b2dad6f8b5ea0",
"size": "6257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/interop/com/apps/word.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from GitSearch.Indexer.NewJavaParser import parse
from GitSearch.MyUtils import remove_unified_stop_lists, write_search_log
def Generator(code):
file_content = code
# print '1. Origianl Query : ', file_content
ast = parse(file_content, resolve=False) # newJavaParser를 사용하여 자바 코드 파싱
query = add_code_keyword_into_document(file_content, ast)
# print "Query before the removing stop words : ", query
# write_search_log("\nQuery before the removing stop words : " + str(query))
# print '2. Right after the code query generator : ', query
query = remove_unified_stop_lists(query)
# print '3. Right after the stop words removing : ', query
# print "Transformed user code query : ", query
# write_search_log("\nTransformed user code query : " + str(query))
return query
def add_code_keyword_into_document(file_content, node):
query = ""
for m in node["typed_method_call"]:
if m:
query += "word:" + m + " "
for e in node["extends"]:
if e:
query += "word:" + e + " "
for c in node["used_classes"]:
if c:
query += "word:" + c + " "
for i in node["class_instance_creation"]:
if i:
query += "word:" + i + " "
for m in node["methods"]:
if m:
query += "word:" + m + " "
for m in node["methods_called"]:
if m:
query += "word:" + m + " "
for m in node["unresolved_method_calls"]:
if m:
query += "word:" + m + " "
for l in node["literals"]:
if l:
query += "word:" + l + " "
return query
if __name__ == '__main__':
code = """
package com.han.streaming;
import android.util.Log;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
/**
* Created by Han on 2016-07-18.
*/
public class FTPConnector {
private FTPClient ftpClient;
public FTPConnector() {
ftpClient = new FTPClient();
}
public boolean login() {
try {
ftpClient.setControlEncoding("UTF-8");
ftpClient.connect("http://192.168.0.119");
ftpClient.login("Artbit3", "artbit123");
ftpClient.enterLocalPassiveMode();
ftpClient.makeDirectory("music_upload");
ftpClient.changeWorkingDirectory("music_upload");
return true;
} catch (IOException e) {
Log.e("FTP_CONNECT", "LOGIN ERROR");
return false;
}
}
public boolean uploadFile(File file) {
boolean uploadResult = true;
if (ftpClient.isConnected()) {
Log.e("FTP_UPLOAD", "CONNECTION IS NOT OPEN");
return false;
}
FileInputStream inputStream = null;
try {
ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
inputStream = new FileInputStream(file);
if (!ftpClient.storeFile(file.getName(), inputStream)) {
uploadResult = false;
Log.e("FTP_UPLOAD", "FILE SEND ERROR");
}
} catch (IOException e) {
uploadResult = false;
Log.e("FTP_UPLOAD", "FILE SEND ERROR IN CATCH");
} finally {
try {
if (inputStream != null) inputStream.close();
ftpClient.logout();
} catch (IOException e) {
uploadResult = false;
Log.e("FTP_UPLOAD", "STREAM CLOSE ERROR");
}
}
if (ftpClient.isConnected()) {
try {
ftpClient.disconnect();
} catch (IOException e) {
Log.e("FTP_UPLOAD", "DISCONNECT FAIL ERROR");
uploadResult = false;
}
}
return uploadResult;
}
}
"""
code = """Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
int width = (int) screenSize.getWidth();
int height = (int) screenSize.getHeight();
this.setLayout(new BorderLayout(50, 50));
this.setBounds(0, 0, width, height);
this.setExtendedState(JFrame.MAXIMIZED_BOTH);"""
user_query = Generator(code)
print user_query
|
{
"content_hash": "38cf5194cd1a0ac3883ca8c1f9ab9c68",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 81,
"avg_line_length": 27.690322580645162,
"alnum_prop": 0.5580149114631874,
"repo_name": "facoy/facoy",
"id": "4b41831e57aa23ff4f0c088f6c63ba32d7d1233f",
"size": "4314",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-1.0",
"path": "FrontEnd/Generator_Code_Query_text_base.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16161"
},
{
"name": "Java",
"bytes": "1038237"
},
{
"name": "JavaScript",
"bytes": "6570"
}
],
"symlink_target": ""
}
|
"""Test of "New Hunt" wizard."""
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import foreman
from grr_response_server import foreman_rules
from grr_response_server import hunt as lib_hunt
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import test_lib
class TestNewHuntWizard(gui_test_lib.GRRSeleniumHuntTest):
"""Test the "new hunt wizard" GUI."""
@staticmethod
def FindForemanRules(hunt_urn):
rules = data_store.REL_DB.ReadAllForemanRules()
return [rule for rule in rules if rule.hunt_id == hunt_urn.Basename()]
def testNewHuntWizard(self):
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
# Change "path" and "pathtype" values
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "NTFS")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual(
"/tmp",
self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual(
"NTFS",
self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"
))
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Configure the hunt to use dummy output plugin.
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filepath Regex')) "
"input", "some regex")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Empty set of rules should be valid.
self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")
# A note informs what an empty set of rules means.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Alternative match mode that matches a client if
# any of the rules evaluates to true can be selected.
self.Select(
"css=grr-configure-rules-page "
"label:contains('Match mode') ~ * select", "Match any")
# The note depends on the match mode.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Create 3 foreman rules. Note that "Add" button adds rules
# to the beginning of a list. So we always use :nth(0) selector.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
rule = foreman_rules.ForemanRegexClientRule
label = rule.ForemanStringField.SYSTEM.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Attribute regex') ~ * input", "Linux")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select",
"Integer")
rule = foreman_rules.ForemanIntegerClientRule
label = rule.ForemanIntegerField.CLIENT_CLOCK.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Operator') ~ * select", "GREATER_THAN")
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Value') ~ * input", "1336650631137737")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Click("css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Os darwin') ~ * input[type=checkbox]")
# Click on "Back" button
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button again and check that all the values that
# we've just entered remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Paths')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('/tmp')")
# Check that output plugins are shown.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('Client rule set')"))
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Select newly created hunt.
self.Click("css=grr-hunts-list td:contains('gui_user')")
# Check that correct details are displayed in hunt details tab.
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('GenericHunt')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('Flow Arguments')")
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Client Rule Set')"))
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
args = hunt.args.standard.flow_args.Unpack(rdf_file_finder.FileFinderArgs)
self.assertEqual(args.paths[0], "/tmp")
self.assertEqual(args.pathtype, rdf_paths.PathSpec.PathType.NTFS)
# self.assertEqual(hunt.args.flow_args.ignore_errors, True)
self.assertEqual(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)
lib_hunt.StartHunt(hunt.hunt_id)
hunt_rules = self.FindForemanRules(
rdfvalue.RDFURN("hunts").Add(hunt.hunt_id))
# Check that the hunt was created with correct rules
self.assertLen(hunt_rules, 1)
lifetime = hunt_rules[0].GetLifetime()
lifetime -= rdfvalue.Duration.From(2, rdfvalue.WEEKS)
self.assertLessEqual(lifetime, rdfvalue.Duration.From(1, rdfvalue.SECONDS))
r = hunt_rules[0].client_rule_set
self.assertEqual(r.match_mode,
foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
self.assertLen(r.rules, 3)
self.assertEqual(r.rules[0].rule_type,
foreman_rules.ForemanClientRule.Type.OS)
self.assertEqual(r.rules[0].os.os_windows, False)
self.assertEqual(r.rules[0].os.os_linux, False)
self.assertEqual(r.rules[0].os.os_darwin, True)
self.assertEqual(r.rules[1].rule_type,
foreman_rules.ForemanClientRule.Type.INTEGER)
self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
self.assertEqual(
r.rules[1].integer.operator,
foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
self.assertEqual(r.rules[1].integer.value, 1336650631137737)
self.assertEqual(r.rules[2].rule_type,
foreman_rules.ForemanClientRule.Type.REGEX)
self.assertEqual(r.rules[2].regex.field, "SYSTEM")
self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
def testWizardStepCounterIsShownCorrectly(self):
# Open up and click on View Hunts.
self.Open("/#/hunts")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on the FileFinder item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 1 out of 6')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 2 out of 6')")
def testLiteralExpressionIsProcessedCorrectly(self):
"""Literals are raw bytes. Testing that raw bytes are processed right."""
# Open up and click on View Hunts.
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", "foo\\x0d\\xc8bar")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(
self.IsElementPresent,
"css=grr-wizard-form:contains('%s')" % file_finder.FileFinder.__name__)
self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct literal value.
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
args = hunt.args.standard.flow_args.Unpack(rdf_file_finder.FileFinderArgs)
self.assertEqual(args.conditions[0].contents_literal_match.literal,
b"foo\x0d\xc8bar")
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-wizard-form:contains('Dummy do do')")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider(
{"AdminUI.new_hunt_wizard.default_output_plugin": "DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('DummyOutputPlugin')")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner1", u"foo")
self.AddClientLabel(client_id, u"owner2", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to hunt parameters page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
# Check that there's an option present for labels 'bar' (this option
# should be selected) and for label 'foo'.
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleMatchesCorrectClients(self):
client_ids = self.SetupClients(10)
self.AddClientLabel(client_ids[1], u"owner1", u"foo")
self.AddClientLabel(client_ids[1], u"owner2", u"bar")
self.AddClientLabel(client_ids[7], u"GRR", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page, hunt parameters page
# and then to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Select 'Clients With Label' rule.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "foo")
self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Add label')) button")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "bar")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Match mode')) select", "Match any")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
hunt = hunts_list[0]
lib_hunt.StartHunt(hunt.hunt_id)
foreman_obj = foreman.Foreman()
for client_id in client_ids:
tasks_assigned = foreman_obj.AssignTasksToClient(client_id)
if client_id in [client_ids[1], client_ids[7]]:
self.assertTrue(tasks_assigned)
else:
self.assertFalse(tasks_assigned)
def CreateSampleHunt(self, description, creator=None):
self.StartHunt(
description=description,
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=transfer.GetFile.__name__),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.NTFS,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyOutputPlugin",
plugin_args=gui_test_lib.DummyOutputPlugin.args_type(
filename_regex="blah!", fetch_binaries=True))
],
client_rate=60,
paused=True,
creator=creator or self.test_username)
def testPathAutocomplete(self):
# Open Hunts
self.Open("/#/hunts")
# Open "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
input_selector = "css=grr-form-glob-expression input[uib-typeahead]"
# Change "path"
self.Type(input_selector, "/foo/%%path")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains('%%environ_path%%')")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("/foo/%%environ_path%%", self.GetValue,
input_selector + ":text")
if __name__ == "__main__":
app.run(test_lib.main)
|
{
"content_hash": "d2089c91d0d4a6ceb0d2d36f7854aa26",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 80,
"avg_line_length": 39.925925925925924,
"alnum_prop": 0.6611148591668072,
"repo_name": "google/grr",
"id": "f9b29f38348c0bb6395e820ee219462050b1deef",
"size": "23738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/gui/selenium_tests/hunt_create_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
import os
import boto.swf
import json
import datetime
import time
import zipfile
import shutil
import re
from ftplib import FTP
import ftplib
import activity
import boto.s3
from boto.s3.connection import S3Connection
import provider.s3lib as s3lib
import provider.simpleDB as dblib
from elifetools import parseJATS as parser
from elifetools import xmlio
"""
PMCDeposit activity
"""
class activity_PMCDeposit(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "PMCDeposit"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 15
self.description = ("Download single zip file an article, repackage it, " +
"send to PMC and notify them.")
# Local directory settings
self.TMP_DIR = self.get_tmp_dir() + os.sep + "tmp_dir"
self.INPUT_DIR = self.get_tmp_dir() + os.sep + "input_dir"
self.JUNK_DIR = self.get_tmp_dir() + os.sep + "junk_dir"
self.ZIP_DIR = self.get_tmp_dir() + os.sep + "zip_dir"
self.EPS_DIR = self.get_tmp_dir() + os.sep + "eps_dir"
self.TIF_DIR = self.get_tmp_dir() + os.sep + "tif_dir"
self.OUTPUT_DIR = self.get_tmp_dir() + os.sep + "output_dir"
# Data provider where email body is saved
self.db = dblib.SimpleDB(settings)
# Bucket settings
self.input_bucket = None
self.input_bucket_default = (settings.publishing_buckets_prefix +
settings.archive_bucket)
self.publish_bucket = settings.poa_packaging_bucket
self.published_folder = "pmc/published"
self.published_zip_folder = "pmc/zip"
# journal
self.journal = 'elife'
# Outgoing FTP settings are set later
self.FTP_URI = None
self.FTP_USERNAME = None
self.FTP_PASSWORD = None
self.FTP_CWD = None
self.FTP_SUBDIR = []
def do_activity(self, data=None):
"""
Activity, do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
# Data passed to this activity
self.document = data["data"]["document"]
# Custom bucket, if specified
if "bucket" in data["data"]:
self.input_bucket = data["data"]["bucket"]
else:
self.input_bucket = self.input_bucket_default
# Create output directories
self.create_activity_directories()
# Download the S3 objects
self.download_files_from_s3(self.document)
verified = None
# Check for an empty folder and respond true
# if we do not do this it will continue to attempt this activity
if len(self.file_list(self.INPUT_DIR)) <= 0:
if self.logger:
self.logger.info('folder was empty in PMCDeposit: ' + self.INPUT_DIR)
verified = True
folder = self.INPUT_DIR
if self.logger:
self.logger.info('processing files in folder ' + folder)
self.unzip_article_files(self.file_list(folder))
(fid, status, version, volume) = self.profile_article(self.document)
# Rename the files
file_name_map = self.rename_files_remove_version_number()
(verified, renamed_list, not_renamed_list) = self.verify_rename_files(file_name_map)
if self.logger:
self.logger.info("verified " + folder + ": " + str(verified))
self.logger.info(file_name_map)
if len(not_renamed_list) > 0:
if self.logger:
self.logger.info("not renamed " + str(not_renamed_list))
# Convert the XML
self.convert_xml(xml_file=self.article_xml_file(),
file_name_map=file_name_map)
# Get the new zip file name
# TODO - may need to take into account the r1 r2 revision numbers when replacing an article
revision = self.zip_revision_number(fid)
self.zip_file_name = self.new_zip_filename(self.journal, volume, fid, revision)
print self.zip_file_name
self.create_new_zip(self.zip_file_name)
# Set FTP settings
self.set_ftp_settings(fid)
ftp_status = None
if verified and self.zip_file_name:
ftp_status = self.ftp_to_endpoint(self.file_list(self.ZIP_DIR), self.FTP_SUBDIR, passive=True)
if ftp_status is True:
self.upload_article_zip_to_s3()
# Send email
file_size = self.file_size(os.path.join(self.ZIP_DIR, self.zip_file_name))
self.add_email_to_queue(self.journal, volume, fid, revision, self.zip_file_name, file_size)
# Return the activity result, True or False
if verified is True and ftp_status is True:
result = True
else:
result = False
# Clean up disk
self.clean_tmp_dir()
return result
def set_ftp_settings(self, doi_id):
"""
Set the outgoing FTP server settings based on the
workflow type specified
"""
self.FTP_URI = self.settings.PMC_FTP_URI
self.FTP_USERNAME = self.settings.PMC_FTP_USERNAME
self.FTP_PASSWORD = self.settings.PMC_FTP_PASSWORD
self.FTP_CWD = self.settings.PMC_FTP_CWD
def ftp_upload(self, ftp, file):
ext = os.path.splitext(file)[1]
#print file
uploadname = file.split(os.sep)[-1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + file, open(file))
else:
#print "uploading " + uploadname
ftp.storbinary("STOR " + uploadname, open(file, "rb"), 1024)
#print "uploaded " + uploadname
def ftp_cwd_mkd(self, ftp, sub_dir):
"""
Given an FTP connection and a sub_dir name
try to cwd to the directory. If the directory
does not exist, create it, then cwd again
"""
cwd_success = None
try:
ftp.cwd(sub_dir)
cwd_success = True
except ftplib.error_perm:
# Directory probably does not exist, create it
ftp.mkd(sub_dir)
cwd_success = False
if cwd_success is not True:
ftp.cwd(sub_dir)
return cwd_success
def ftp_to_endpoint(self, uploadfiles, sub_dir_list=None, passive=True):
try:
for uploadfile in uploadfiles:
ftp = FTP()
if passive is False:
ftp.set_pasv(False)
ftp.connect(self.FTP_URI)
ftp.login(self.FTP_USERNAME, self.FTP_PASSWORD)
self.ftp_cwd_mkd(ftp, "/")
if self.FTP_CWD != "":
self.ftp_cwd_mkd(ftp, self.FTP_CWD)
if sub_dir_list is not None:
for sub_dir in sub_dir_list:
self.ftp_cwd_mkd(ftp, sub_dir)
self.ftp_upload(ftp, uploadfile)
ftp.quit()
return True
except:
return False
def download_files_from_s3(self, document):
if self.logger:
self.logger.info('downloading VoR file ' + document)
subfolder_name = ""
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(self.input_bucket)
s3_key_name = document
s3_key_names = [s3_key_name]
self.download_s3_key_names_to_subfolder(bucket, s3_key_names, subfolder_name)
def download_s3_key_names_to_subfolder(self, bucket, s3_key_names, subfolder_name):
for s3_key_name in s3_key_names:
# Download objects from S3 and save to disk
s3_key = bucket.get_key(s3_key_name)
filename = s3_key_name.split("/")[-1]
# Make the subfolder if it does not exist yet
try:
os.mkdir(self.INPUT_DIR + os.sep + subfolder_name)
except:
pass
filename_plus_path = (self.INPUT_DIR
+ os.sep + subfolder_name
+ os.sep + filename)
mode = "wb"
f = open(filename_plus_path, mode)
s3_key.get_contents_to_file(f)
f.close()
def upload_article_zip_to_s3(self):
"""
Upload PMC zip file to S3
"""
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
for file_name in self.file_list(self.ZIP_DIR):
s3_key_name = self.published_zip_folder + '/' + self.file_name_from_name(file_name)
s3_key = boto.s3.key.Key(bucket)
s3_key.key = s3_key_name
s3_key.set_contents_from_filename(file_name, replace=True)
def list_dir(self, dir_name):
dir_list = os.listdir(dir_name)
dir_list = map(lambda item: dir_name + os.sep + item, dir_list)
return dir_list
def folder_list(self, dir_name):
dir_list = self.list_dir(dir_name)
return filter(lambda item: os.path.isdir(item), dir_list)
def file_list(self, dir_name):
dir_list = self.list_dir(dir_name)
return filter(lambda item: os.path.isfile(item), dir_list)
def folder_name_from_name(self, input_dir, file_name):
folder_name = file_name.split(input_dir)[1]
folder_name = folder_name.split(os.sep)[1]
return folder_name
def file_name_from_name(self, file_name):
name = file_name.split(os.sep)[-1]
return name
def file_extension(self, file_name):
name = self.file_name_from_name(file_name)
if name:
if len(name.split('.')) > 1:
return name.split('.')[-1]
else:
return None
return None
def file_size(self, file_name):
return os.path.getsize(file_name)
def unzip_or_move_file(self, file_name, to_dir, do_unzip=True):
"""
If file extension is zip, then unzip contents
If file the extension
"""
if self.file_extension(file_name) == 'zip' and do_unzip is True:
# Unzip
if self.logger:
self.logger.info("going to unzip " + file_name + " to " + to_dir)
myzip = zipfile.ZipFile(file_name, 'r')
myzip.extractall(to_dir)
elif self.file_extension(file_name):
# Copy
if self.logger:
self.logger.info("going to move and not unzip " + file_name + " to " + to_dir)
shutil.copyfile(file_name, to_dir + os.sep + self.file_name_from_name(file_name))
def approve_file(self, file_name):
return True
def unzip_article_files(self, file_list):
for file_name in file_list:
if self.approve_file(file_name):
if self.logger:
self.logger.info("unzipping or moving file " + file_name)
self.unzip_or_move_file(file_name, self.TMP_DIR)
def rename_files_remove_version_number(self):
"""
Rename files to not include the version number, if present
Pre-PPP files will not have a version number, for before PPP is launched
"""
file_name_map = {}
# Get a list of all files
dirfiles = self.file_list(self.TMP_DIR)
for df in dirfiles:
filename = df.split(os.sep)[-1]
# Get the new file name
file_name_map[filename] = None
# TODO strip the -v1 from it
file_extension = filename.split('.')[-1]
if '-v' in filename:
# Use part before the -v number
part_without_version = filename.split('-v')[0]
else:
# No -v found, use the file name minus the extension
part_without_version = ''.join(filename.split('.')[0:-1])
renamed_filename = part_without_version + '.' + file_extension
if renamed_filename:
file_name_map[filename] = renamed_filename
else:
if self.logger:
self.logger.info('there is no renamed file for ' + filename)
for old_name, new_name in file_name_map.iteritems():
if new_name is not None:
shutil.move(self.TMP_DIR + os.sep + old_name, self.OUTPUT_DIR + os.sep + new_name)
return file_name_map
def verify_rename_files(self, file_name_map):
"""
Each file name as key should have a non None value as its value
otherwise the file did not get renamed to something new and the
rename file process was not complete
"""
verified = True
renamed_list = []
not_renamed_list = []
for k, v in file_name_map.items():
if v is None:
verified = False
not_renamed_list.append(k)
else:
renamed_list.append(k)
return (verified, renamed_list, not_renamed_list)
def convert_xml(self, xml_file, file_name_map):
# Register namespaces
xmlio.register_xmlns()
root, doctype_dict = xmlio.parse(xml_file, return_doctype_dict=True)
# Convert xlink href values
total = xmlio.convert_xlink_href(root, file_name_map)
# TODO - compare whether all file names were converted
# Start the file output
reparsed_string = xmlio.output(root, type=None, doctype_dict=doctype_dict)
f = open(xml_file, 'wb')
f.write(reparsed_string)
f.close()
def zip_revision_number(self, fid):
"""
Look at previously supplied files and determine the
next revision number
"""
revision = None
bucket_name = self.publish_bucket
prefix = self.published_zip_folder + '/'
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id, self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
s3_key_names = s3lib.get_s3_key_names_from_bucket(
bucket=bucket,
prefix=prefix)
s3_key_name = s3lib.latest_pmc_zip_revision(fid, s3_key_names)
if s3_key_name:
# Found an existing PMC zip file, look for a revision number
revision_match = re.match(ur'.*r(.*)\.zip$', s3_key_name)
if revision_match is None:
# There is a zip but no revision number, use 1
revision = 1
else:
# Use the latest revision plus 1
revision = int(revision_match.group(1)) + 1
return revision
def new_zip_filename(self, journal, volume, fid, revision=None):
filename = journal
filename = filename + '-' + str(volume).zfill(2)
filename = filename + '-' + str(fid).zfill(5)
if revision:
filename = filename + '.r' + str(revision)
filename += '.zip'
return filename
def create_new_zip(self, zip_file_name):
if self.logger:
self.logger.info("creating new PMC zip file named " + zip_file_name)
new_zipfile = zipfile.ZipFile(self.ZIP_DIR + os.sep + zip_file_name,
'w', zipfile.ZIP_DEFLATED, allowZip64=True)
dirfiles = self.file_list(self.OUTPUT_DIR)
for df in dirfiles:
filename = df.split(os.sep)[-1]
new_zipfile.write(df, filename)
new_zipfile.close()
def profile_article(self, document):
"""
Temporary, profile the article by folder names in test data set
In real code we still want this to return the same values
"""
# Temporary setting of version values from directory names
soup = self.article_soup(self.article_xml_file())
# elife id / doi id / manuscript id
fid = parser.doi(soup).split('.')[-1]
# article status
if parser.is_poa(soup) is True:
status = 'poa'
else:
status = 'vor'
# version
version = self.version_number(document)
# volume
volume = parser.volume(soup)
return (fid, status, version, volume)
def version_number(self, document):
version = None
m = re.search(ur'-v([0-9]*?)[\.|-]', document)
if m is not None:
version = m.group(1)
return version
def article_xml_file(self):
"""
Two directories the XML file might be in depending on the step
"""
file_name = None
for file_name in self.file_list(self.TMP_DIR):
if file_name.endswith('.xml'):
return file_name
if not file_name:
for file_name in self.file_list(self.OUTPUT_DIR):
if file_name.endswith('.xml'):
return file_name
return file_name
def article_soup(self, xml_filename):
return parser.parse_document(xml_filename)
def add_email_to_queue(self, journal, volume, fid, revision, file_name, file_size):
"""
After do_activity is finished, send emails to recipients
on the status
"""
# Connect to DB
db_conn = self.db.connect()
current_time = time.gmtime()
body = self.get_email_body(current_time, journal, volume, fid, revision,
file_name, file_size)
if revision:
subject = self.get_revision_email_subject(fid)
else:
subject = self.get_email_subject(current_time, journal, volume, fid, revision,
file_name, file_size)
sender_email = self.settings.ses_pmc_sender_email
recipient_email_list = self.email_recipients(revision)
for email in recipient_email_list:
# Add the email to the email queue
self.db.elife_add_email_to_email_queue(
recipient_email=email,
sender_email=sender_email,
email_type="PMCDeposit",
format="text",
subject=subject,
body=body)
return True
def email_recipients(self, revision):
"""
Get a list of email recipients depending on the revision number
because for PMC we will redirect a revision email to different recipients
"""
recipient_email_list = []
if revision:
settings_email_recipient = self.settings.ses_pmc_revision_recipient_email
else:
settings_email_recipient = self.settings.ses_pmc_recipient_email
# Handle multiple recipients, if specified
if type(settings_email_recipient) == list:
for email in settings_email_recipient:
recipient_email_list.append(email)
else:
recipient_email_list.append(settings_email_recipient)
return recipient_email_list
def get_revision_email_subject(self, fid):
"""
Email subject line for notifying production about a PMC revision
"""
subject = "You need to email PMC: article " + str(fid).zfill(5) + "!!!"
return subject
def get_email_subject(self, current_time, journal, volume, fid, revision,
file_name, file_size):
date_format = '%Y-%m-%d %H:%M'
datetime_string = time.strftime(date_format, current_time)
subject = (journal + " PMC deposit " + datetime_string + ", article " + str(fid).zfill(5))
if revision:
subject += ", revision " + str(revision)
return subject
def email_body_revision_header(self, revision):
header = None
if revision:
header = "Production please forward this to PMC with details of what changed"
return header
def get_email_body(self, current_time, journal, volume, fid, revision,
file_name, file_size):
body = ""
date_format = '%Y-%m-%dT%H:%M'
datetime_string = time.strftime(date_format, current_time)
# Header
if self.email_body_revision_header(revision):
body += self.email_body_revision_header(revision)
body += "\n\n"
# Include the subject line to be used
revision_email_subject = self.get_email_subject(current_time, journal, volume, fid,
revision, file_name, file_size)
body += str(revision_email_subject)
body += "\n\n"
# Bulk of body
body += "PMCDeposit activity" + "\n"
body += "\n"
body += journal + " deposit date: " + datetime_string + "\n"
body += "\n"
body += "Journal title: " + journal + "\n"
body += "Volume: " + str(volume).zfill(2) + "\n"
body += "Article: " + str(fid).zfill(2) + "\n"
if revision:
revision_text = str(revision)
else:
revision_text = "n/a"
body += "Revision: " + revision_text + "\n"
body += "\n"
body += "Zip filename: " + file_name + "\n"
body += "File size (bytes): " + str(file_size) + "\n"
body += "\n"
body += "\n\nSincerely\n\neLife bot"
return body
def create_activity_directories(self):
"""
Create the directories in the activity tmp_dir
"""
try:
os.mkdir(self.TMP_DIR)
os.mkdir(self.INPUT_DIR)
os.mkdir(self.JUNK_DIR)
os.mkdir(self.ZIP_DIR)
os.mkdir(self.EPS_DIR)
os.mkdir(self.TIF_DIR)
os.mkdir(self.OUTPUT_DIR)
except OSError:
pass
|
{
"content_hash": "3f3c1514d844a85c96debf9636414bc4",
"timestamp": "",
"source": "github",
"line_count": 678,
"max_line_length": 107,
"avg_line_length": 32.892330383480825,
"alnum_prop": 0.5633828079458321,
"repo_name": "gnott/elife-bot",
"id": "353f208edc88825f7ed8b497f884d58d8c7e3844",
"size": "22301",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "activity/activity_PMCDeposit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "53428"
},
{
"name": "HTML",
"bytes": "3975"
},
{
"name": "Python",
"bytes": "1295112"
},
{
"name": "Shell",
"bytes": "2363"
}
],
"symlink_target": ""
}
|
"""
ILCM tutorial on mnist using advbox tool.
ILCM method extends "BIM" to support targeted attack.
"""
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
import paddle.fluid as fluid
import paddle
from advbox.adversary import Adversary
from advbox.attacks.gradient_method import ILCM
from advbox.models.paddle import PaddleModel
from tutorials.mnist_model import mnist_cnn_model
def main():
"""
Advbox demo which demonstrate how to use advbox.
"""
TOTAL_NUM = 500
IMG_NAME = 'img'
LABEL_NAME = 'label'
img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32')
# gradient should flow
img.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
# use CPU
place = fluid.CPUPlace()
# use GPU
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
BATCH_SIZE = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.test(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
fluid.io.load_params(
exe, "./mnist/", main_program=fluid.default_main_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name, (-1, 1),
channel_axis=1)
attack = ILCM(m)
attack_config = {"epsilons": 0.1, "steps": 100}
# use train data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in train_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
tlabel = 0
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# ILCM targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TRAIN_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
tlabel = 0
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# ILCM targeted attack
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("ilcm attack done")
if __name__ == '__main__':
main()
|
{
"content_hash": "7a859b57f1f5554d9c7644e9b402cb67",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 84,
"avg_line_length": 31.5,
"alnum_prop": 0.5811965811965812,
"repo_name": "kuke/models",
"id": "b12ffaab0367769d9bf9d58ec7396c8edd2487e9",
"size": "4095",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/adversarial/tutorials/mnist_tutorial_ilcm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
}
|
"""
Creates an index of OBO terms in either Python Pickle format or JSON
Input:
You provide a full go.obo file. Only the term id, name and is_a relationship is
really used. The rest is ignored.
Output:
The output is a lookup with the highest-level keys being the different GO namespaces.
Each of these values is a key-value lookup, where each key is one of the full set of GO terms
and the corresponding value is the closest slim term to which it maps. Currently stored
as a python pickled dict() file or JSON text file, depending on option passed.
NOTES:
Terms with namespace: external are skipped.
"""
import argparse
import os
import pickle
import json
import re
import igraph
def main():
parser = argparse.ArgumentParser( description='Creates a quick-lookup index of source terms to their slim counterparts')
## output file to be written
parser.add_argument('-i', '--obo_file', type=str, required=True, help='Input file with ALL terms, such as go.obo' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Output file to be created.' )
parser.add_argument('-f', '--output_format', type=str, required=False, default='pickle', help='Output format (pickle or json)' )
args = parser.parse_args()
# Parse the OBO file and store a term lookup as well as graphs for each namespace
full_terms, full_graph = parse_obo_graph(args.obo_file)
if args.output_format == 'pickle':
out_data = { 'terms': full_terms, 'graph': full_graph }
with open(args.output_file, 'wb') as f:
pickle.dump(out_data, f, pickle.HIGHEST_PROTOCOL)
elif args.output_format == 'json':
out_data = { 'terms': full_terms }
with open(args.output_file, 'wt') as f:
f.write(json.dumps(out_data))
else:
raise Exception("Error, --output_format must be either 'json' or 'pickle'")
print("\nDone")
def parse_obo_graph(path):
stored_pickles_found = False
g = {'biological_process': igraph.Graph(directed=True),
'cellular_component': igraph.Graph(directed=True),
'molecular_function': igraph.Graph(directed=True) }
for ns in g:
pickle_file_path = "{0}.{1}.graph".format(path, ns)
if os.path.exists(pickle_file_path):
print("Using stored ontology graph: {0}".format(pickle_file_path))
g[ns] = igraph.Graph.Read_Pickle(fname=pickle_file_path)
stored_pickles_found = True
# key: GO:ID, value = {'ns': 'biological_process', 'idx': 25}
terms = dict()
if stored_pickles_found is True:
print("Using stored terms data structure: {0}.terms".format(path))
with open("{0}.terms".format(path), 'rb') as f:
terms = pickle.load(f)
# key: namespace, value=int
next_idx = {'biological_process': 0,
'cellular_component': 0,
'molecular_function': 0 }
id = None
namespace = None
name = None
# Pass through the file once just to get all the GO terms and their namespaces
# This makes the full pass far easier, since terms can be referenced which haven't
# been seen yet.
if stored_pickles_found is False:
alt_ids = list()
for line in open(path):
line = line.rstrip()
if line.startswith('[Term]'):
if id is not None:
# error checking
if namespace is None:
raise Exception("Didn't find a namespace for term {0}".format(id))
g[namespace].add_vertices(1)
idx = next_idx[namespace]
g[namespace].vs[idx]['id'] = id
g[namespace].vs[idx]['name'] = name
next_idx[namespace] += 1
terms[id] = {'ns': namespace, 'idx': idx, 'name': name}
# duplicate this for any aliases
for alt_id in alt_ids:
terms[alt_id] = {'ns': namespace, 'idx': idx, 'name': name}
# reset for next term
id = None
namespace = None
name = None
alt_ids = list()
elif line.startswith('id:'):
id = line.split(' ')[1]
elif line.startswith('namespace:'):
namespace = line.split(' ')[1]
elif line.startswith('name:'):
m = re.match('name: (.+)', line)
if m:
name = m.group(1).rstrip()
else:
raise Exception("Failed to regex this line: {0}".format(line))
elif line.startswith('alt_id: '):
alt_ids.append(line.split(' ')[1])
id = None
namespace = None
name = None
is_obsolete = False
is_a = list()
# Now actually parse the rest of the properties
if stored_pickles_found is False:
for line in open(path):
line = line.rstrip()
if line.startswith('[Term]'):
if id is not None:
# make any edges in the graph
for is_a_id in is_a:
# these two terms should be in the same namespace
if terms[id]['ns'] != terms[is_a_id]['ns']:
raise Exception("is_a relationship found with terms in different namespaces")
g[namespace].add_edge(terms[id]['idx'], terms[is_a_id]['idx'])
# reset for this term
id = None
namespace = None
is_obsolete = False
is_a = list()
elif line.startswith('id:'):
id = line.split(' ')[1]
elif line.startswith('namespace:'):
namespace = line.split(' ')[1]
elif line.startswith('is_a:'):
is_a.append(line.split(' ')[1])
if stored_pickles_found is False:
for ns in g:
pickle_file_path = "{0}.{1}.graph".format(path, ns)
g[ns].write_pickle(fname=pickle_file_path)
## save the terms too so we don't have to redo that parse
with open("{0}.terms".format(path), 'wb') as f:
pickle.dump(terms, f, pickle.HIGHEST_PROTOCOL)
return terms, g
if __name__ == '__main__':
main()
|
{
"content_hash": "e19e26bf4cf9ae87a8708f16d84f2817",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 132,
"avg_line_length": 33.82631578947368,
"alnum_prop": 0.5506457133966081,
"repo_name": "jorvis/biocode",
"id": "e11a97d1e1efd227456aab8293c21745539433c0",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "general/make_go_index.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1865"
},
{
"name": "Perl",
"bytes": "311270"
},
{
"name": "Python",
"bytes": "1124360"
},
{
"name": "R",
"bytes": "5481"
},
{
"name": "Shell",
"bytes": "9642"
}
],
"symlink_target": ""
}
|
"""A client for in-process kernels."""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# IPython imports
from IPython.utils.traitlets import Type, Instance
from IPython.kernel.clientabc import KernelClientABC
from IPython.kernel.client import KernelClient
# Local imports
from .channels import (
InProcessShellChannel,
InProcessIOPubChannel,
InProcessHBChannel,
InProcessStdInChannel,
)
#-----------------------------------------------------------------------------
# Main kernel Client class
#-----------------------------------------------------------------------------
class InProcessKernelClient(KernelClient):
"""A client for an in-process kernel.
This class implements the interface of
`IPython.kernel.clientabc.KernelClientABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `IPython.kernel.client.KernelClient` for docstrings.
"""
# The classes to use for the various channels.
shell_channel_class = Type(InProcessShellChannel)
iopub_channel_class = Type(InProcessIOPubChannel)
stdin_channel_class = Type(InProcessStdInChannel)
hb_channel_class = Type(InProcessHBChannel)
kernel = Instance('IPython.kernel.inprocess.ipkernel.InProcessKernel')
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
def start_channels(self, *args, **kwargs):
super(InProcessKernelClient, self).start_channels(self)
self.kernel.frontends.append(self)
@property
def shell_channel(self):
if self._shell_channel is None:
self._shell_channel = self.shell_channel_class(self)
return self._shell_channel
@property
def iopub_channel(self):
if self._iopub_channel is None:
self._iopub_channel = self.iopub_channel_class(self)
return self._iopub_channel
@property
def stdin_channel(self):
if self._stdin_channel is None:
self._stdin_channel = self.stdin_channel_class(self)
return self._stdin_channel
@property
def hb_channel(self):
if self._hb_channel is None:
self._hb_channel = self.hb_channel_class(self)
return self._hb_channel
#-----------------------------------------------------------------------------
# ABC Registration
#-----------------------------------------------------------------------------
KernelClientABC.register(InProcessKernelClient)
|
{
"content_hash": "836ccd6a31c26a341d58ba3662c651b1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 34.08988764044944,
"alnum_prop": 0.523731048121292,
"repo_name": "mattvonrocketstein/smash",
"id": "5f31265771c5170bf53bda4225589e358089b8a0",
"size": "3034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/kernel/inprocess/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
PACKAGE_NAME = 'cenaming'
ENCODING = 'utf-8'
local_directory = os.path.abspath(os.path.dirname(__file__))
version_path = os.path.join(local_directory, PACKAGE_NAME, '_version.py')
version_ns = {}
with open(version_path, 'r', encoding=ENCODING) as f:
exec(f.read(), {}, version_ns)
def get_requirements(requirement_file):
requirements = list(open(requirement_file).read().strip().split('\r\n'))
return requirements
setup(name=PACKAGE_NAME,
packages=[PACKAGE_NAME],
include_package_data=True,
license='MIT',
version=version_ns['__version__'],
description='Company legal name normalization and shortening.',
url='https://github.com/portfoliome/cenaming',
author='Philip Martin',
author_email='philip.martin@censible.co',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'
],
keywords='companies finance names',
extras_require={
'develop': get_requirements('requirements-dev.txt'),
'test': get_requirements('requirements-test.txt')
},
zip_safe=False)
|
{
"content_hash": "6c9a42f0536f2d30e64c477b7ac50b61",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 31.372093023255815,
"alnum_prop": 0.6360266864343959,
"repo_name": "portfoliome/cenaming",
"id": "2bdcadeba29aa7e0a9ad2d27730c48a925ff0b86",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7168"
}
],
"symlink_target": ""
}
|
import time
import sys
from i_o import io
from alu import ALU
from memory import Memory
class Controller:
def __init__(self):
"""Model of a standard contoller unit demonstrating the Texas 4-step (fetch,decode,execute,store) with methods for each."""
self.R1 = 0 #General purpose register to store final result.
self.R2 = 0 #General purpose register to store file length.
self.R3 = "" #Storage register to store mnemonic from memory.
self.IR = 0 #Instruction register
self.PC = 0 #Program counter/accumulator
self.running = False #Machine state
self.clock = time.time() #Start system clock
self.ALU = ALU() #Arithmetic-logic units
self.MEM = Memory() #System memory
def loader(self, cfile):
"""Load file data into memory prior 4-step and store it into gp register R2."""
io("Loading " + cfile + "...")
self.R2 = self.MEM.loader(cfile)
io("Processed: " + str(self.R2) + " lines.")
io("Machine is running...")
self.running = True
def fetch(self):
"""Fetch next instruction stored in memory using PC address and store it into storage register R3."""
self.R3 = self.MEM.read(self.PC)
io("|FETCH|--> address(" + str(self.PC) + ")")
io(">read(" +str(self.PC)+ "->" + self.R3 + ")")
def decode(self):
"""Decode data fetched. Determine if valid value or instruction, bump accumulator, and store in inst register IR."""
self.PC += 1
try:
self.IR = int(self.R3)
self.R3 = "push"
io("\t|DECODE|--> decoding(" + str(self.IR) + ")...")
io("\t>found operand(" + str(self.IR) + ")")
io("\t>valid instruction")
except ValueError:
self.IR = self.R3
io("\t|DECODE|--> decoding(" + self.IR + ")...")
io("\t>found operator(" + self.IR + ")")
io("\t>valid instruction")
def execute(self):
"""Execute instruction fetched from memory and operate/manage stack memory."""
op = self.R3
if op == "push":
io("\t\t|EXECUTE|--> " + op)
io("\t\t>pushed(" + str(self.IR) + ")")
self.MEM.push(self.IR)
else:
op1, op2 = self.MEM.pop()
io("\t\t|EXECUTE|--> " + op + "(" + str(op1) + "," + str(op2) + ")")
io("\t\t>pop <-(" + str(op1) + ")")
io("\t\t>pop <-(" + str(op2) + ")")
self.R1 = self.ALU.operate(op, op1, op2)
io("\t\t>push ->(" + str(self.R1) + ")")
self.MEM.push(self.R1)
def store(self):
"""Store resulting data, output value that is stored in register."""
io("\t\t\t|STORE|--> storing(" + str(self.R1) + ")")
def run(self):
"""Start steppin. Begin the Texas 4-step, calculate/display the total processing time, and display final result."""
while self.running and self.PC < self.R2:
io('=' * 62)
self.fetch()
self.decode()
self.execute()
self.store()
io('='*62 + "\nResult:\t" + str(self.R1) + "\nTime :\t"
+ str(round(time.time() - self.clock, 4)) + " s\n" +'='*62)
|
{
"content_hash": "e3bd5518b2748db24a19470cdf7e17ee",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 131,
"avg_line_length": 41.8375,
"alnum_prop": 0.5162832387212429,
"repo_name": "yebra06/RPNv2-CPU-Simulator",
"id": "dcbf7b09a607ea09b8ada1bd45581f84f0925949",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7036"
}
],
"symlink_target": ""
}
|
import re
import time
import os
import select
from os import access
from os.path import join, exists, getmtime, getsize
from urllib import unquote
from BaseHTTPServer import BaseHTTPRequestHandler as _
from libs.git import Git
def format_date_time(timestamp):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_.weekdayname[wd], day, _.monthname[month], year, hh, mm, ss
)
def callback(p):
ofd = p.stdout.fileno()
efd = p.stderr.fileno()
while True:
r_ready, w_ready, x_ready = select.select([ofd, efd], [], [], 30)
if ofd in r_ready:
data = os.read(ofd, 8192)
if not data:
break
yield data
if efd in r_ready:
data = os.read(efd, 8192)
yield data
break
output, err = p.communicate()
if output:
yield output
if err:
yield err
class GHTTPServer(object):
VALID_SERVICE_TYPES = ['upload-pack', 'receive-pack']
SERVICES = [
["POST", 'service_rpc', re.compile("(.*?)/git-upload-pack$"), 'upload-pack'],
["POST", 'service_rpc', re.compile("(.*?)/git-receive-pack$"), 'receive-pack'],
["GET", 'get_info_refs', re.compile("(.*?)/info/refs$")],
["GET", 'get_text_file', re.compile("(.*?)/HEAD$")],
["GET", 'get_text_file', re.compile("(.*?)/objects/info/alternates$")],
["GET", 'get_text_file', re.compile("(.*?)/objects/info/http-alternates$")],
["GET", 'get_info_packs', re.compile("(.*?)/objects/info/packs$")],
["GET", 'get_text_file', re.compile("(.*?)/objects/info/[^/]*$")],
["GET", 'get_loose_object', re.compile("(.*?)/objects/[0-9a-f]{2}/[0-9a-f]{38}$")],
["GET", 'get_pack_file', re.compile("(.*?)/objects/pack/pack-[0-9a-f]{40}\\.pack$")],
["GET", 'get_idx_file', re.compile("(.*?)/objects/pack/pack-[0-9a-f]{40}\\.idx$")],
]
def __init__(self, config=None):
self.headers = {}
self.set_config(config)
self.git = Git("/Users/xtao/gentoo/usr/bin/git")
self.RE_SERVICES = []
def set_config(self, config):
self.config = config or {}
def set_config_setting(self, key, value):
self.config[key] = value
def __call__(self, environ, start_response):
self.env = environ
body = self.call()
start_response(self.status, self.headers.items())
return body
def call(self):
match = self.match_routing(self.env["PATH_INFO"].lstrip('/'), self.env["REQUEST_METHOD"])
if not match:
return self.render_not_found()
cmd, path, reqfile, rpc = match
self.rpc = rpc
self.reqfile = reqfile
if cmd == "not_allowed":
return self.render_method_not_allowed()
self.dir = self.get_git_dir(path)
if not self.dir:
return self.render_not_found()
func = getattr(self, cmd)
return func()
def service_rpc(self):
if not self.has_access(self.rpc, True):
return self.render_no_access()
input = self.read_body
git_cmd = "upload_pack" if self.rpc == "upload-pack" else "receive_pack"
self.status = "200"
self.headers["Content-Type"] = "application/x-git-%s-result" % self.rpc
return getattr(self.git, git_cmd)(self.dir, {"msg": input}, callback)
def get_info_refs(self):
service_name = self.get_service_type()
if self.has_access(service_name):
git_cmd = "upload_pack" if service_name == "upload-pack" else "receive_pack"
refs = getattr(self.git, git_cmd)(self.dir, {"advertise_refs": True})
self.status = "200"
self.headers["Content-Type"] = "application/x-git-%s-advertisement" % service_name
self.hdr_nocache()
def read_file():
yield self.pkt_write("# service=git-%s\n" % service_name)
yield self.pkt_flush
yield refs
return read_file()
else:
return self.dumb_info_refs()
def get_text_file(self):
return self.send_file(self.reqfile, "text/plain")
def dumb_info_refs(self):
self.update_server_info()
return self.send_file(self.reqfile, "text/plain; charset=utf-8")
def get_info_packs(self):
# objects/info/packs
return self.send_file(self.reqfile, "text/plain; charset=utf-8")
def get_loose_object(self):
return self.send_file(self.reqfile, "application/x-git-loose-object", cached=True)
def get_pack_file(self):
return self.send_file(self.reqfile, "application/x-git-packed-objects", cached=True)
def get_idx_file(self):
return self.send_file(self.reqfile, "application/x-git-packed-objects-toc", cached=True)
def get_service_type(self):
def get_param():
for query in self.env["QUERY_STRING"].split('&'):
param = tuple(query.split('='))
if param and param[0] == "service":
return param[1]
service_type = get_param()
if not service_type:
return False
if service_type[0:4] != 'git-':
return False
return service_type.replace('git-', '')
@classmethod
def match_routing(cls, path_info, request_method):
for service in cls.SERVICES:
rpc = None
if len(service) == 4:
method, handler, re_match, rpc = service
elif len(service) == 3:
method, handler, re_match = service
m = re_match.match(path_info)
if m:
if method != request_method:
return ["not_allowed", None, None, None]
cmd = handler
path = m.group(1)
file = path_info.replace(path + '/', '')
return [cmd, path, file, rpc]
return None
def send_file(self, reqfile, content_type, cached=False):
reqfile = join(self.dir, reqfile)
if not self.is_subpath(reqfile, self.dir):
return self.render_no_access()
if not exists(reqfile) or not access(reqfile, os.R_OK):
return self.render_not_found()
self.status = "200"
self.headers["Content-Type"] = content_type
self.headers["Last-Modified"] = format_date_time(getmtime(reqfile))
if cached:
self.hdr_cache_forenver()
else:
self.hdr_nocache()
size = getsize(reqfile)
if size:
self.headers["Content-Length"] = size
def read_file():
with open(reqfile, "rb") as f:
while True:
part = f.read(8192)
if not part:
break
yield part
return read_file()
else:
with open(reqfile, "rb") as f:
part = f.read()
self.headers["Content-Length"] = str(len(part))
return [part]
def update_server_info(self):
self.git.update_server_info(self.dir)
@property
def read_body(self):
input = self.env["wsgi.input"]
return input.read()
# ------------------------------
# packet-line handling functions
# ------------------------------
@property
def pkt_flush(self):
return '0000'
def pkt_write(self, str):
# TODO: use zfill
PKT_FORMAT = "{0:{fill}{align}{width}{base}}{1}"
return PKT_FORMAT.format(len(str) + 4,
str,
base='x',
width=4,
fill='0',
align='>')
# ------------------------
# header writing functions
# ------------------------
def hdr_nocache(self):
self.headers["Expires"] = "Fri, 01 Jan 1980 00:00:00 GMT"
self.headers["Pragma"] = "no-cache"
self.headers["Cache-Control"] = "no-cache, max-age=0, must-revalidate"
def hdr_cache_forenver(self):
now = int(time.time())
self.headers["Date"] = str(now)
self.headers["Expires"] = str(now + 31536000)
self.headers["Cache-Control"] = "public, max-age=31536000"
# --------------------------------------
# HTTP error response handling functions
# --------------------------------------
def render_method_not_allowed(self):
env = []
if env["SERVER_PROTOCOL"] == "HTTP/1.1":
self.status = "405"
self.headers["Content-Type"] = "text/plain"
return ["Method Not Allowed"]
else:
self.status = "400"
self.headers["Content-Type"] = "text/plain"
return ["Bad Request"]
def render_not_found(self):
self.status = "404"
self.headers["Content-Type"] = "text/plain"
return ["Not Found"]
def render_no_access(self):
self.status = "403"
self.headers["Content-Type"] = "text/plain"
return ["Forbidden"]
def has_access(self, rpc, check_content_type=False):
if check_content_type:
if self.env["CONTENT_TYPE"] != "application/x-git-%s-request" % rpc:
return False
if rpc not in self.VALID_SERVICE_TYPES:
return False
if rpc == 'receive-pack':
if "receive_pack" in self.config:
return self.config.get("receive_pack")
if rpc == 'upload-pack':
if "upload_pack" in self.config:
return self.config.get("upload_pack")
return self.get_config_setting(rpc)
def get_config_setting(self, service_name):
service_name = service_name.replace('-', '')
setting = self.git.get_config_setting(self.dir, "http.%s" % service_name)
if service_name == 'uploadpack':
return setting != 'false'
else:
return setting == 'true'
def get_git_dir(self, path):
root = self.get_project_root()
path = join(root, path)
if not self.is_subpath(path, root):
return False
if exists(path): # TODO: check is a valid git directory
return path
return False
def get_project_root(self):
root = self.config.get("project_root") or os.getcwd()
return root
def is_subpath(self, path, checkpath):
path = unquote(path)
checkpath = unquote(checkpath)
# Remove trailing slashes from filepath
checkpath = checkpath.replace("\/+$",'')
if re.match("^%s(\/|$)" % checkpath, path):
return True
|
{
"content_hash": "d64f10f9ec59623acc87b34825580fd5",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 97,
"avg_line_length": 34.35668789808917,
"alnum_prop": 0.5296625880608083,
"repo_name": "douban/gpack",
"id": "4aed6719f2edd61b3f22dc853e7d5e254e29829a",
"size": "10788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghttp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14798"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from copy import copy
from itertools import chain
import os
import re
import sys
import shlex
try:
from urllib.request import pathname2url
from urllib.parse import urljoin
except ImportError: # Python2
from urllib import pathname2url
from urlparse import urljoin
from django.conf import settings
from django.utils import six
from .subprocess import check_output
def _options_to_args(**options):
"""Converts ``options`` into a list of command-line arguments."""
flags = []
for name in sorted(options):
value = options[name]
if value is None:
continue
flags.append('--' + name.replace('_', '-'))
if value is not True:
flags.append(six.text_type(value))
return flags
def wkhtmltopdf(pages, output=None, **kwargs):
"""
Converts html to PDF using http://wkhtmltopdf.org/.
pages: List of file paths or URLs of the html to be converted.
output: Optional output file path. If None, the output is returned.
**kwargs: Passed to wkhtmltopdf via _extra_args() (See
https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF
for acceptable args.)
Kwargs is passed through as arguments. e.g.:
{'footer_html': 'http://example.com/foot.html'}
becomes
'--footer-html http://example.com/foot.html'
Where there is no value passed, use True. e.g.:
{'disable_javascript': True}
becomes:
'--disable-javascript'
To disable a default option, use None. e.g:
{'quiet': None'}
becomes:
''
example usage:
wkhtmltopdf(pages=['/tmp/example.html'],
dpi=300,
orientation='Landscape',
disable_javascript=True)
"""
if isinstance(pages, six.string_types):
# Support a single page.
pages = [pages]
if output is None:
# Standard output.
output = '-'
# Default options:
options = getattr(settings, 'WKHTMLTOPDF_CMD_OPTIONS', None)
if options is None:
options = {'quiet': True}
else:
options = copy(options)
options.update(kwargs)
# Force --encoding utf8 unless the user has explicitly overridden this.
options.setdefault('encoding', 'utf8')
env = getattr(settings, 'WKHTMLTOPDF_ENV', None)
if env is not None:
env = dict(os.environ, **env)
cmd = 'WKHTMLTOPDF_CMD'
cmd = getattr(settings, cmd, os.environ.get(cmd, 'wkhtmltopdf'))
ck_args = list(chain(shlex.split(cmd),
_options_to_args(**options),
list(pages),
[output]))
ck_kwargs = {'env': env}
try:
i = sys.stderr.fileno()
ck_kwargs['stderr'] = sys.stderr
except AttributeError:
# can't call fileno() on mod_wsgi stderr object
pass
return check_output(ck_args, **ck_kwargs)
def content_disposition_filename(filename):
"""
Sanitize a file name to be used in the Content-Disposition HTTP
header.
Even if the standard is quite permissive in terms of
characters, there are a lot of edge cases that are not supported by
different browsers.
See http://greenbytes.de/tech/tc2231/#attmultinstances for more details.
"""
filename = filename.replace(';', '').replace('"', '')
return http_quote(filename)
def http_quote(string):
"""
Given a unicode string, will do its dandiest to give you back a
valid ascii charset string you can use in, say, http headers and the
like.
"""
if isinstance(string, six.text_type):
try:
import unidecode
string = unidecode.unidecode(string)
except ImportError:
string = string.encode('ascii', 'replace')
# Wrap in double-quotes for ; , and the like
string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"')
return '"{0!s}"'.format(string.decode())
def pathname2fileurl(pathname):
"""Returns a file:// URL for pathname. Handles OS-specific conversions."""
return urljoin('file:', pathname2url(pathname))
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''["|']({0}.*?)["|']'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur,
pathname2fileurl(x['root']) +
occur[len(x['url']):])
return content
|
{
"content_hash": "b55e737a0f1775f9328748a04362bee9",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 85,
"avg_line_length": 30.245714285714286,
"alnum_prop": 0.5790666918571699,
"repo_name": "unrealsolver/django-wkhtmltopdf",
"id": "f9d7ac1e59978aeb8e1b1a95b337e05f4e5913ff",
"size": "5293",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wkhtmltopdf/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "374"
},
{
"name": "Makefile",
"bytes": "227"
},
{
"name": "Python",
"bytes": "30944"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from socialregistration.clients.oauth import OAuth2
from socialregistration.settings import SESSION_KEY
import json
class Instagram(OAuth2):
client_id = getattr(settings, 'INSTAGRAM_CLIENT_ID', '')
secret = getattr(settings, 'INSTAGRAM_CLIENT_SECRET', '')
scope = getattr(settings, 'INSTAGRAM_REQUEST_PERMISSIONS', 'basic')
auth_url = 'https://api.instagram.com/oauth/authorize/'
access_token_url = 'https://api.instagram.com/oauth/access_token/'
_user_info = None
def get_callback_url(self, **kwargs):
if self.is_https():
return 'https://%s%s' % (Site.objects.get_current().domain,
reverse('socialregistration:instagram:callback'))
return 'http://%s%s' % (Site.objects.get_current().domain,
reverse('socialregistration:instagram:callback'))
def get_access_token(self, **params):
"""
Instagram requires a `grant_type` parameter when requesting the access
token.
"""
return super(Instagram, self).get_access_token(grant_type='authorization_code', **params)
def parse_access_token(self, content):
return json.loads(content)
def get_user_info(self):
return self.access_token_dict['user']['id']
@staticmethod
def get_session_key():
return '%sinstagram' % SESSION_KEY
|
{
"content_hash": "e29a60f2bf031de12b87dec926598558",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 97,
"avg_line_length": 34.651162790697676,
"alnum_prop": 0.6597315436241611,
"repo_name": "mark-adams/django-socialregistration",
"id": "ed7d2348523020d5f26b9860c6edaf5f52adc461",
"size": "1490",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "socialregistration/contrib/instagram/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6160"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "211294"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils import timezone
from django.db.models import F
from .managers import TopicQuerySet
from ..core.utils.models import AutoSlugField
class Topic(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='st_topics')
category = models.ForeignKey('spirit_category.Category', verbose_name=_("category"))
title = models.CharField(_("title"), max_length=255)
slug = AutoSlugField(populate_from="title", db_index=False, blank=True)
date = models.DateTimeField(_("date"), default=timezone.now)
last_active = models.DateTimeField(_("last active"), default=timezone.now)
is_pinned = models.BooleanField(_("pinned"), default=False)
is_globally_pinned = models.BooleanField(_("globally pinned"), default=False)
is_closed = models.BooleanField(_("closed"), default=False)
is_removed = models.BooleanField(default=False)
view_count = models.PositiveIntegerField(_("views count"), default=0)
comment_count = models.PositiveIntegerField(_("comment count"), default=0)
objects = TopicQuerySet.as_manager()
class Meta:
ordering = ['-last_active', '-pk']
verbose_name = _("topic")
verbose_name_plural = _("topics")
def get_absolute_url(self):
if self.category_id == settings.ST_TOPIC_PRIVATE_CATEGORY_PK:
return reverse('spirit:topic:private:detail', kwargs={'topic_id': str(self.id), 'slug': self.slug})
else:
return reverse('spirit:topic:detail', kwargs={'pk': str(self.id), 'slug': self.slug})
@property
def main_category(self):
return self.category.parent or self.category
@property
def bookmark(self):
# *bookmarks* is dynamically created by manager.with_bookmarks()
try:
assert len(self.bookmarks) <= 1, "Panic, too many bookmarks"
return self.bookmarks[0]
except (AttributeError, IndexError):
return
def increase_view_count(self):
Topic.objects\
.filter(pk=self.pk)\
.update(view_count=F('view_count') + 1)
def increase_comment_count(self):
Topic.objects\
.filter(pk=self.pk)\
.update(comment_count=F('comment_count') + 1, last_active=timezone.now())
def decrease_comment_count(self):
# todo: update last_active to last() comment
Topic.objects\
.filter(pk=self.pk)\
.update(comment_count=F('comment_count') - 1)
|
{
"content_hash": "f460f71dfaad388f6d89b75df2858fa9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 111,
"avg_line_length": 37.208333333333336,
"alnum_prop": 0.6610675625233297,
"repo_name": "adiyengar/Spirit",
"id": "d68fd9490e096cc69b4c7919c6e463f63eef4b69",
"size": "2704",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "spirit/topic/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "146213"
},
{
"name": "CoffeeScript",
"bytes": "109334"
},
{
"name": "HTML",
"bytes": "250480"
},
{
"name": "JavaScript",
"bytes": "132341"
},
{
"name": "Python",
"bytes": "533086"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.