text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Test basic application."""
from __future__ import absolute_import, print_function
import logging
import warnings
from os.path import exists, join
import click
import pytest
from click.testing import CliRunner
from flask import Blueprint, Flask, current_app
from mock import patch
from pkg_resources import EntryPoint
from werkzeug.routing import BaseConverter
from werkzeug.wsgi import DispatcherMiddleware
from invenio_base import __version__
from invenio_base.app import _loader, app_loader, base_app, blueprint_loader, \
configure_warnings, converter_loader, create_app_factory, create_cli
from invenio_base.cli import generate_secret_key
class ListConverter(BaseConverter):
"""Simple list converter."""
def to_python(self, value):
"""Return Python object."""
return value.split('+')
def to_url(self, values):
"""Return string."""
return '+'.join(BaseConverter.to_url(value)
for value in values)
#
# Mock helpers
#
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
"""Initialize handler."""
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
"""Emit log record by saving message to internal list."""
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
"""Reset internal list of messages."""
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class MockEntryPoint(EntryPoint):
"""Mocking of entrypoint."""
def load(self):
"""Mock load entry point."""
if self.name == 'fail':
raise Exception("Fail")
return self.name
class NoRequireEntryPoint(EntryPoint):
"""Load without requirements check."""
def load(self):
"""Mock load entry point."""
return super(NoRequireEntryPoint, self).load(require=False)
def _mock_entry_points(name):
data = dict(
entrypoint1=[MockEntryPoint('ep1.e1', 'ep1.e1'),
MockEntryPoint('ep1.e2', 'ep1.e2'), ],
entrypoint2=[MockEntryPoint('ep2.e1', 'ep2.e1'),
MockEntryPoint('ep2.e2', 'ep2.e2'), ],
entrypoint3=[MockEntryPoint('fail', 'ep3.e1',), ],
entrypoint4=[NoRequireEntryPoint.parse(
'mylist = test_app:ListConverter'), ],
)
names = data.keys() if name is None else [name]
for key in names:
for entry_point in data[key]:
yield entry_point
#
# Tests
#
def test_version():
"""Test version."""
assert __version__
def test_configure_warnings():
"""Test warnings configuration."""
logger = logging.getLogger('py.warnings')
handler = MockLoggingHandler()
logger.addHandler(handler)
# Warnings not routed through logging
warnings.warn("Test")
assert handler.messages['warning'] == []
# Warnings through logging
configure_warnings()
warnings.warn("A warning")
assert "A warning" in handler.messages['warning'][0]
handler.reset()
warnings.warn("Pending deprecation", PendingDeprecationWarning)
warnings.warn("Deprecation", DeprecationWarning)
assert len(handler.messages['warning']) == 1
handler.reset()
warnings.simplefilter("always")
warnings.warn("Pending deprecation", PendingDeprecationWarning)
warnings.warn("Deprecation", DeprecationWarning)
assert len(handler.messages['warning']) == 2
warnings.resetwarnings()
@patch('pkg_resources.iter_entry_points', _mock_entry_points)
def test_loader():
"""Test loader."""
app = Flask(__name__)
found = []
_loader(app, lambda x: found.append(x), entry_points=None, modules=None)
assert found == []
# Modules
found = []
_loader(app, lambda x: found.append(x), modules=['a', 'b'])
assert found == ['a', 'b']
# Entry points
found = []
_loader(
app,
lambda x: found.append(x), entry_points=['entrypoint1', 'entrypoint2'])
assert found == ['ep1.e1', 'ep1.e2', 'ep2.e1', 'ep2.e2']
# Modules and entry points (entry points loaded before modules)
found = []
_loader(
app,
lambda x: found.append(x),
entry_points=['entrypoint1', 'entrypoint2'],
modules=['a', 'b']
)
assert found == ['ep1.e1', 'ep1.e2', 'ep2.e1', 'ep2.e2', 'a', 'b']
@patch('pkg_resources.iter_entry_points', _mock_entry_points)
def test_loader_exceptions():
"""Test exceptions during loading."""
app = Flask(__name__)
handler = MockLoggingHandler()
app.logger.addHandler(handler)
def _raise_func():
raise Exception()
assert len(handler.messages['error']) == 0
pytest.raises(
Exception, _loader, app, lambda x: x(),
entry_points=None, modules=[_raise_func])
assert len(handler.messages['error']) == 1
pytest.raises(
Exception, _loader, app, lambda x: x(),
entry_points=['entrypoint3'], modules=None)
assert len(handler.messages['error']) == 2
def test_app_loader():
"""Test app loader."""
class FlaskExt(object):
def __init__(self, app):
self.app = app
app.extensions['ext'] = self
app = Flask('testapp')
assert 'ext' not in app.extensions
app_loader(app, modules=[FlaskExt])
assert 'ext' in app.extensions
assert app.extensions['ext'].app is app
def test_blueprint_loader():
"""Test app loader."""
bp = Blueprint('test', 'test')
app = Flask('testapp')
assert len(app.blueprints) == 0
blueprint_loader(app, modules=[bp])
assert len(app.blueprints) == 1
def test_coverter_loader():
"""Test converter loader."""
app = Flask('testapp')
assert 'mylist' not in app.url_map.converters
converter_loader(app, modules={'mylist': ListConverter})
assert 'mylist' in app.url_map.converters
@patch('pkg_resources.iter_entry_points', _mock_entry_points)
def test_coverter_loader_from_entry_points():
"""Test converter loader."""
app = Flask('testapp')
assert 'mylist' not in app.url_map.converters
converter_loader(app, entry_points=['entrypoint4'])
assert 'mylist' in app.url_map.converters
@patch('pkg_resources.iter_entry_points', _mock_entry_points)
def test_coverter_loader_fail():
"""Test converter loader."""
app = Flask('testapp')
with pytest.raises(Exception):
converter_loader(app, entry_points=['entrypoint3'])
def test_base_app(tmppath):
"""Test base app creation."""
# Test default static_url_path and CLI initialization
app = base_app('test')
assert app.name == 'test'
assert app.cli
assert app.static_url_path == '/static'
assert app.instance_path != tmppath
# Test specifying instance path
app = base_app('test', instance_path=tmppath)
assert app.instance_path == tmppath
assert exists(app.instance_path)
assert app.static_folder is None
# Test automatic instance path creation
newpath = join(tmppath, 'test')
assert not exists(newpath)
app = base_app('test', instance_path=newpath)
assert exists(newpath)
assert app.static_folder is None
# Test static folder
staticpath = join(tmppath, 'teststatic')
app = base_app('test', static_folder=staticpath)
assert app.static_folder == staticpath
assert app.instance_path is not None
# Test static + instance folder
staticpath = join(tmppath, 'teststatic')
app = base_app('test', instance_path=tmppath,
static_folder=staticpath)
assert app.static_folder == staticpath
assert app.instance_path == tmppath
# Test choice loader
searchpath = join(tmppath, "tpls")
app = base_app('test', template_folder=searchpath)
assert app.jinja_loader.searchpath == [searchpath]
app = base_app('test')
assert app.jinja_loader.searchpath == [join(app.root_path, 'templates')]
def test_base_app_class(tmppath):
"""Test using custom Flask application class."""
class CustomFlask(Flask):
pass
app = base_app('test', app_class=CustomFlask)
assert isinstance(app, CustomFlask)
def test_create_app_factory():
"""Test app factory factory."""
class FlaskExt(object):
def __init__(self, app):
self.app = app
app.extensions['ext'] = self
bp = Blueprint('test', 'test')
# Create app
create_app = create_app_factory(
'test', blueprints=[bp], extensions=[FlaskExt])
assert callable(create_app)
app = create_app()
assert app.name == 'test'
assert len(app.blueprints) == 1
assert 'ext' in app.extensions
def test_create_app_debug_flag():
"""Test debug flag propagation (needed by CLI)."""
create_app = create_app_factory('test')
assert create_app().debug is False
assert create_app(debug=True).debug is True
def test_create_app_factory_config_loader():
"""Test app factory conf loader."""
def _config_loader(app, **kwargs):
app.config['CONFIG_LOADER'] = True
app.config.update(kwargs)
create_app = create_app_factory('test', config_loader=_config_loader)
app = create_app(KWARGS_TEST=True)
assert app.config['CONFIG_LOADER']
assert app.config['KWARGS_TEST']
def test_create_app_factory_wsgi_factory():
"""Test app factory wsgi factory."""
def _wsgi_factory(app):
return DispatcherMiddleware(app.wsgi_app, {'/test': Flask('dispatch')})
create_app = create_app_factory('test', wsgi_factory=_wsgi_factory)
app = create_app()
assert isinstance(app.wsgi_app, DispatcherMiddleware)
def test_create_cli_with_app():
"""Test create cli."""
app_name = 'mycmdtest'
create_app = create_app_factory(app_name)
cli = create_cli(create_app=create_app)
@cli.command()
def test_cmd():
click.echo('{0} {1}'.format(current_app.name, current_app.debug))
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
result = runner.invoke(cli, ['test_cmd'])
assert result.exit_code == 0
assert u'{0} False\n'.format(app_name) in result.output
# FIXME wait for fixed support in Flask.
# result = runner.invoke(cli, ['--debug', 'test_cmd'])
# assert result.exit_code == 0
# assert u'{0} True\n'.format(app_name) in result.output
def test_create_cli_without_app():
"""Test create cli."""
from invenio_base.__main__ import cli
@cli.command()
def test_cmd():
click.echo(current_app.name)
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
result = runner.invoke(cli, ['test_cmd'])
assert result.exit_code != 0
assert 'FLASK_APP' in result.output
def test_generate_secret_key():
"""Test generation of a secret key."""
v1 = generate_secret_key()
v2 = generate_secret_key()
assert len(v1) == len(v2) == 256
assert v1 != v2
|
{
"content_hash": "af9e46f3983981a1dec00fac4d4b8556",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 79,
"avg_line_length": 28.54639175257732,
"alnum_prop": 0.635247381726255,
"repo_name": "tiborsimko/invenio-base",
"id": "6f333e9b46e7d406e1b6e18157daee86e149039b",
"size": "11311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46013"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
"""
Use the Python DTrace consumer as a Thread and run a syscall counter DTrace
script.
Created on Oct 10, 2011
@author: tmetsch
"""
import time
from dtrace_ctypes import consumer
SCRIPT = 'syscall:::entry { @num[execname] = count(); }'
def main():
"""
Run DTrace...
"""
dtrace = consumer.DTraceConsumerThread(SCRIPT)
dtrace.start()
# we will stop the thread after some time...
time.sleep(2)
# stop and wait for join...
dtrace.stop()
dtrace.join()
if __name__ == '__main__':
main()
|
{
"content_hash": "a16f5f9f1ef0ec87ce63e4bb5d1c0f7f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 16.272727272727273,
"alnum_prop": 0.6219739292364991,
"repo_name": "tmetsch/python-dtrace",
"id": "dc72f00f3cb16730f028a796de628404b81fbc33",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ctypes/syscall_count_continuous.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3579"
},
{
"name": "Cython",
"bytes": "22817"
},
{
"name": "Python",
"bytes": "28476"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the Scality SOFS Volume Driver.
"""
import errno
import os
import shutil
import tempfile
import mox as mox_lib
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder import units
from cinder import utils
from cinder.volume.drivers import scality
class ScalityDriverTestCase(test.TestCase):
"""Test case for the Scality driver."""
TEST_MOUNT = '/tmp/fake_mount'
TEST_CONFIG = '/tmp/fake_config'
TEST_VOLDIR = 'volumes'
TEST_VOLNAME = 'volume_name'
TEST_VOLSIZE = '0'
TEST_VOLUME = {
'name': TEST_VOLNAME,
'size': TEST_VOLSIZE
}
TEST_VOLPATH = os.path.join(TEST_MOUNT,
TEST_VOLDIR,
TEST_VOLNAME)
TEST_SNAPNAME = 'snapshot_name'
TEST_SNAPSHOT = {
'name': TEST_SNAPNAME,
'volume_name': TEST_VOLNAME,
'volume_size': TEST_VOLSIZE
}
TEST_SNAPPATH = os.path.join(TEST_MOUNT,
TEST_VOLDIR,
TEST_SNAPNAME)
TEST_CLONENAME = 'clone_name'
TEST_CLONE = {
'name': TEST_CLONENAME,
'size': TEST_VOLSIZE
}
TEST_NEWSIZE = '2'
TEST_IMAGE_SERVICE = 'image_service'
TEST_IMAGE_ID = 'image_id'
TEST_IMAGE_META = 'image_meta'
def _makedirs(self, path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _create_fake_config(self):
open(self.TEST_CONFIG, "w+").close()
def _create_fake_mount(self):
self._makedirs(os.path.join(self.TEST_MOUNT, 'sys'))
self._makedirs(os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR))
def _remove_fake_config(self):
try:
os.unlink(self.TEST_CONFIG)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _configure_driver(self):
scality.CONF.scality_sofs_config = self.TEST_CONFIG
scality.CONF.scality_sofs_mount_point = self.TEST_MOUNT
scality.CONF.scality_sofs_volume_dir = self.TEST_VOLDIR
scality.CONF.volume_dd_blocksize = '1M'
def _execute_wrapper(self, cmd, *args, **kwargs):
try:
kwargs.pop('run_as_root')
except KeyError:
pass
utils.execute(cmd, *args, **kwargs)
def _set_access_wrapper(self, is_visible):
def _access_wrapper(path, flags):
if path == '/sbin/mount.sofs':
return is_visible
else:
return os.access(path, flags)
self.stubs.Set(os, 'access', _access_wrapper)
def setUp(self):
super(ScalityDriverTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.TEST_MOUNT = self.tempdir
self.TEST_VOLPATH = os.path.join(self.TEST_MOUNT,
self.TEST_VOLDIR,
self.TEST_VOLNAME)
self.TEST_SNAPPATH = os.path.join(self.TEST_MOUNT,
self.TEST_VOLDIR,
self.TEST_SNAPNAME)
self.TEST_CLONEPATH = os.path.join(self.TEST_MOUNT,
self.TEST_VOLDIR,
self.TEST_CLONENAME)
self._driver = scality.ScalityDriver()
self._driver.set_execute(self._execute_wrapper)
self._mox = mox_lib.Mox()
self._create_fake_mount()
self._create_fake_config()
self._configure_driver()
def tearDown(self):
shutil.rmtree(self.tempdir)
self._remove_fake_config()
super(ScalityDriverTestCase, self).tearDown()
def test_setup_no_config(self):
"""Missing SOFS configuration shall raise an error."""
scality.CONF.scality_sofs_config = None
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.do_setup, None)
def test_setup_missing_config(self):
"""Non-existent SOFS configuration file shall raise an error."""
scality.CONF.scality_sofs_config = 'nonexistent.conf'
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.do_setup, None)
def test_setup_no_mount_helper(self):
"""SOFS must be installed to use the driver."""
self._set_access_wrapper(False)
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.do_setup, None)
def test_setup_make_voldir(self):
"""The directory for volumes shall be created automatically."""
self._set_access_wrapper(True)
voldir_path = os.path.join(self.TEST_MOUNT, self.TEST_VOLDIR)
os.rmdir(voldir_path)
self._driver.do_setup(None)
self.assertTrue(os.path.isdir(voldir_path))
def test_local_path(self):
"""Expected behaviour for local_path."""
self.assertEqual(self._driver.local_path(self.TEST_VOLUME),
self.TEST_VOLPATH)
def test_create_volume(self):
"""Expected behaviour for create_volume."""
ret = self._driver.create_volume(self.TEST_VOLUME)
self.assertEqual(ret['provider_location'],
os.path.join(self.TEST_VOLDIR,
self.TEST_VOLNAME))
self.assertTrue(os.path.isfile(self.TEST_VOLPATH))
self.assertEqual(os.stat(self.TEST_VOLPATH).st_size,
100 * units.MiB)
def test_delete_volume(self):
"""Expected behaviour for delete_volume."""
self._driver.create_volume(self.TEST_VOLUME)
self._driver.delete_volume(self.TEST_VOLUME)
self.assertFalse(os.path.isfile(self.TEST_VOLPATH))
def test_create_snapshot(self):
"""Expected behaviour for create_snapshot."""
mox = self._mox
vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)
mox.StubOutWithMock(self._driver, '_create_file')
self._driver._create_file(self.TEST_SNAPPATH, vol_size)
mox.StubOutWithMock(self._driver, '_copy_file')
self._driver._copy_file(self.TEST_VOLPATH, self.TEST_SNAPPATH)
mox.ReplayAll()
self._driver.create_snapshot(self.TEST_SNAPSHOT)
mox.UnsetStubs()
mox.VerifyAll()
def test_delete_snapshot(self):
"""Expected behaviour for delete_snapshot."""
mox = self._mox
mox.StubOutWithMock(os, 'remove')
os.remove(self.TEST_SNAPPATH)
mox.ReplayAll()
self._driver.delete_snapshot(self.TEST_SNAPSHOT)
mox.UnsetStubs()
mox.VerifyAll()
def test_initialize_connection(self):
"""Expected behaviour for initialize_connection."""
ret = self._driver.initialize_connection(self.TEST_VOLUME, None)
self.assertEqual(ret['driver_volume_type'], 'scality')
self.assertEqual(ret['data']['sofs_path'],
os.path.join(self.TEST_VOLDIR,
self.TEST_VOLNAME))
def test_copy_image_to_volume(self):
"""Expected behaviour for copy_image_to_volume."""
self.mox.StubOutWithMock(image_utils, 'fetch_to_raw')
image_utils.fetch_to_raw(context,
self.TEST_IMAGE_SERVICE,
self.TEST_IMAGE_ID,
self.TEST_VOLPATH,
mox_lib.IgnoreArg(),
size=self.TEST_VOLSIZE)
self.mox.ReplayAll()
self._driver.copy_image_to_volume(context,
self.TEST_VOLUME,
self.TEST_IMAGE_SERVICE,
self.TEST_IMAGE_ID)
def test_copy_volume_to_image(self):
"""Expected behaviour for copy_volume_to_image."""
self.mox.StubOutWithMock(image_utils, 'upload_volume')
image_utils.upload_volume(context,
self.TEST_IMAGE_SERVICE,
self.TEST_IMAGE_META,
self.TEST_VOLPATH)
self.mox.ReplayAll()
self._driver.copy_volume_to_image(context,
self.TEST_VOLUME,
self.TEST_IMAGE_SERVICE,
self.TEST_IMAGE_META)
def test_create_cloned_volume(self):
"""Expected behaviour for create_cloned_volume."""
self.mox.StubOutWithMock(self._driver, '_create_file')
self.mox.StubOutWithMock(self._driver, '_copy_file')
vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)
self._driver._create_file(self.TEST_CLONEPATH, vol_size)
self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)
self.mox.ReplayAll()
self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)
def test_extend_volume(self):
"""Expected behaviour for extend_volume."""
self.mox.StubOutWithMock(self._driver, '_create_file')
new_size = self._driver._size_bytes(self.TEST_NEWSIZE)
self._driver._create_file(self.TEST_VOLPATH, new_size)
self.mox.ReplayAll()
self._driver.extend_volume(self.TEST_VOLUME, self.TEST_NEWSIZE)
|
{
"content_hash": "70e3d404f7e3441ef698bc401974017f",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 76,
"avg_line_length": 34.380434782608695,
"alnum_prop": 0.5671830540625988,
"repo_name": "spring-week-topos/cinder-week",
"id": "e8efe3003d1f245c416d5feec6d820e9e29a9fd5",
"size": "10093",
"binary": false,
"copies": "2",
"ref": "refs/heads/spring-week",
"path": "cinder/tests/test_scality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6134883"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from nltk.stem.api import StemmerI
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class RegexpStemmer(StemmerI):
"""
A stemmer that uses regular expressions to identify morphological
affixes. Any substrings that match the regular expressions will
be removed.
>>> from nltk.stem import RegexpStemmer
>>> st = RegexpStemmer('ing$|s$|e$|able$', min=4)
>>> st.stem('cars')
'car'
>>> st.stem('mass')
'mas'
>>> st.stem('was')
'was'
>>> st.stem('bee')
'bee'
>>> st.stem('compute')
'comput'
>>> st.stem('advisable')
'advis'
:type regexp: str or regexp
:param regexp: The regular expression that should be used to
identify morphological affixes.
:type min: int
:param min: The minimum length of string to stem
"""
def __init__(self, regexp, min=0):
if not hasattr(regexp, 'pattern'):
regexp = re.compile(regexp)
self._regexp = regexp
self._min = min
def stem(self, word):
if len(word) < self._min:
return word
else:
return self._regexp.sub('', word)
def __repr__(self):
return '<RegexpStemmer: %r>' % self._regexp.pattern
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
{
"content_hash": "3ff1ec69a57a1ae16dcb0d7581948639",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 26.232142857142858,
"alnum_prop": 0.5847515316541865,
"repo_name": "zimmermegan/MARDA",
"id": "ee51cd84862b1908212fa513d8ef7befbda8e4f6",
"size": "1748",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nltk-3.0.3/nltk/stem/regexp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "381901"
},
{
"name": "Groff",
"bytes": "1093743"
},
{
"name": "Java",
"bytes": "392985"
},
{
"name": "Makefile",
"bytes": "769"
},
{
"name": "Perl",
"bytes": "7219"
},
{
"name": "Python",
"bytes": "3873212"
},
{
"name": "Shell",
"bytes": "556"
},
{
"name": "XSLT",
"bytes": "5485"
}
],
"symlink_target": ""
}
|
import os
from pickle import FALSE
import pytest
from stingray.fourier import *
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, "data")
def compare_tables(table1, table2, rtol=0.001, discard=[]):
for key in table1.meta.keys():
if key in discard:
continue
oe, oc = table1.meta[key], table2.meta[key]
if isinstance(oe, (int, str)):
assert oe == oc
elif oe is None:
assert oc is None
elif isinstance(oe, Iterable):
assert np.allclose(oe, oc, rtol=rtol)
else:
assert np.isclose(oe, oc, rtol=rtol)
for col in table1.colnames:
if col in discard:
continue
oe, oc = table1[col], table2[col]
assert np.allclose(oe, oc, rtol=rtol)
def test_norm():
mean = var = 100000
N = 1000000
dt = 0.2
meanrate = mean / dt
lc = np.random.poisson(mean, N)
pds = np.abs(fft(lc)) ** 2
freq = fftfreq(N, dt)
good = slice(1, N // 2)
pdsabs = normalize_abs(pds, dt, lc.size)
pdsfrac = normalize_frac(pds, dt, lc.size, mean)
pois_abs = poisson_level(meanrate=meanrate, norm="abs")
pois_frac = poisson_level(meanrate=meanrate, norm="frac")
assert np.isclose(pdsabs[good].mean(), pois_abs, rtol=0.01)
assert np.isclose(pdsfrac[good].mean(), pois_frac, rtol=0.01)
class TestCoherence(object):
@classmethod
def setup_class(cls):
data = np.load(os.path.join(datadir, "sample_variable_lc.npy"))[:10000] * 1000
cls.data1 = np.random.poisson(data)
cls.data2 = np.random.poisson(data)
ft1 = np.fft.fft(cls.data1)
ft2 = np.fft.fft(cls.data2)
dt = 0.01
cls.N = data.size
mean = np.mean(data)
meanrate = mean / dt
freq = np.fft.fftfreq(data.size, dt)
good = (freq > 0) & (freq < 0.1)
ft1, ft2 = ft1[good], ft2[good]
cls.cross = normalize_periodograms(
ft1.conj() * ft2, dt, cls.N, mean, norm="abs", power_type="all")
cls.pds1 = normalize_periodograms(
ft1 * ft1.conj(), dt, cls.N, mean, norm="abs", power_type="real")
cls.pds2 = normalize_periodograms(
ft2 * ft2.conj(), dt, cls.N, mean, norm="abs", power_type="real")
cls.p1noise = poisson_level(meanrate=meanrate, norm="abs")
cls.p2noise = poisson_level(meanrate=meanrate, norm="abs")
def test_intrinsic_coherence(self):
coh = estimate_intrinsic_coherence(
self.cross, self.pds1, self.pds2, self.p1noise, self.p2noise, self.N)
assert np.allclose(coh, 1, atol=0.001)
def test_raw_high_coherence(self):
coh = raw_coherence(self.cross, self.pds1, self.pds2, self.p1noise, self.p2noise, self.N)
assert np.allclose(coh, 1, atol=0.001)
def test_raw_low_coherence(self):
nbins = 2
C, P1, P2 = self.cross[:nbins], self.pds1[:nbins], self.pds2[:nbins]
bsq = bias_term(P1, P2, self.p1noise, self.p2noise, self.N)
# must be lower than bsq!
low_coh_cross = np.random.normal(bsq**0.5 / 10, bsq**0.5 / 100) + 0.j
coh = raw_coherence(low_coh_cross, P1, P2, self.p1noise, self.p2noise, self.N)
assert np.allclose(coh, 0)
# Do it with a single number
coh = raw_coherence(low_coh_cross[0], P1[0],
P2[0], self.p1noise, self.p2noise, self.N)
# Do it with a single complex object
coh = raw_coherence(complex(low_coh_cross[0]), P1[0],
P2[0], self.p1noise, self.p2noise, self.N)
def test_raw_high_bias(self):
"""Test when squared bias higher than squared norm of cross spec"""
# Values chosen to have a high bias term, larger than |C|^2
C = np.array([12986. + 8694.j])
P1 = np.array([476156.])
P2 = np.array([482751.])
P1noise = 495955
P2noise = 494967
coh = raw_coherence(C, P1, P2, P1noise, P2noise, 499, 1)
coh_sngl = raw_coherence(C[0], P1[0], P2[0], P1noise, P2noise, 499, 1)
assert np.allclose(coh, (C * np.conj(C)).real / (P1 * P2))
assert np.isclose(coh_sngl, (C * np.conj(C)).real[0] / (P1[0] * P2[0]))
class TestFourier(object):
@classmethod
def setup_class(cls):
cls.dt = 1
cls.length = 100
cls.ctrate = 10000
cls.N = np.rint(cls.length / cls.dt).astype(int)
cls.dt = cls.length / cls.N
cls.times = np.sort(np.random.uniform(0, cls.length, int(cls.length * cls.ctrate)))
cls.gti = np.asarray([[0, cls.length]])
cls.counts, bins = np.histogram(cls.times, bins=np.linspace(0, cls.length, cls.N + 1))
cls.errs = np.ones_like(cls.counts) * np.sqrt(cls.ctrate)
cls.bin_times = (bins[:-1] + bins[1:]) / 2
cls.segment_size = 5.0
cls.times2 = np.sort(np.random.uniform(0, cls.length, int(cls.length * cls.ctrate)))
cls.counts2, _ = np.histogram(cls.times2, bins=np.linspace(0, cls.length, cls.N + 1))
cls.errs2 = np.ones_like(cls.counts2) * np.sqrt(cls.ctrate)
def test_error_on_averaged_cross_spectrum_low_nave(self):
with pytest.warns(UserWarning) as record:
error_on_averaged_cross_spectrum(4 + 1.j, 2, 4, 29, 2, 2)
assert np.any(["n_ave is below 30."
in r.message.args[0] for r in record])
def test_ctrate_events(self):
assert get_average_ctrate(self.times, self.gti, self.segment_size) == self.ctrate
def test_ctrate_counts(self):
assert get_average_ctrate(self.bin_times, self.gti, self.segment_size,
self.counts) == self.ctrate
def test_fts_from_segments_invalid(self):
with pytest.raises(ValueError) as excinfo:
# N and counts are both None. This should make the function fail immediately
for _ in get_flux_iterable_from_segments(1, 2, 3, n_bin=None, fluxes=None):
pass
assert 'At least one between fluxes' in str(excinfo.value)
def test_fts_from_segments_cts_and_events_are_equal(self):
N = np.rint(self.segment_size / self.dt).astype(int)
fts_evts = [
f for f in get_flux_iterable_from_segments(self.times, self.gti, self.segment_size, n_bin=N)
]
fts_cts = [
f
for f in get_flux_iterable_from_segments(
self.bin_times, self.gti, self.segment_size, fluxes=self.counts
)
]
for fe, fc in zip(fts_evts, fts_cts):
assert np.allclose(fe, fc)
def test_avg_pds_bad_input(self):
times = np.sort(np.random.uniform(0, 1000, 1))
out_ev = avg_pds_from_events(times, self.gti, self.segment_size, self.dt)
assert out_ev is None
@pytest.mark.parametrize("return_auxil", [True, False])
def test_avg_cs_bad_input(self, return_auxil):
times1 = np.sort(np.random.uniform(0, 1000, 1))
times2 = np.sort(np.random.uniform(0, 1000, 1))
out_ev = avg_cs_from_events(times1, times2, self.gti,
self.segment_size, self.dt, return_auxil=return_auxil)
assert out_ev is None
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_pds_use_common_mean_similar_stats(self, norm):
out_comm = avg_pds_from_events(
self.times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=True,
silent=True,
fluxes=None,
)["power"]
out = avg_pds_from_events(
self.times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=False,
silent=True,
fluxes=None,
)["power"]
assert np.isclose(out_comm.std(), out.std(), rtol=0.1)
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_cs_use_common_mean_similar_stats(self, norm):
out_comm = avg_cs_from_events(
self.times,
self.times2,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=True,
silent=True,
)["power"]
out = avg_cs_from_events(
self.times,
self.times2,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=False,
silent=True,
)["power"]
assert np.isclose(out_comm.std(), out.std(), rtol=0.1)
@pytest.mark.parametrize("use_common_mean", [True, False])
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_pds_cts_and_events_are_equal(self, norm, use_common_mean):
out_ev = avg_pds_from_events(
self.times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=True,
fluxes=None,
)
out_ct = avg_pds_from_events(
self.bin_times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=True,
fluxes=self.counts,
)
compare_tables(out_ev, out_ct)
@pytest.mark.parametrize("use_common_mean", [True, False])
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_pds_cts_and_err_and_events_are_equal(self, norm, use_common_mean):
out_ev = avg_pds_from_events(
self.times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=True,
fluxes=None,
)
out_ct = avg_pds_from_events(
self.bin_times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=True,
fluxes=self.counts,
errors=self.errs,
)
# The variance is not _supposed_ to be equal, when we specify errors
if use_common_mean:
compare_tables(out_ev, out_ct, rtol=0.01, discard=["variance"])
else:
compare_tables(out_ev, out_ct, rtol=0.1, discard=["variance"])
@pytest.mark.parametrize("use_common_mean", [True, False])
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_cs_cts_and_events_are_equal(self, norm, use_common_mean):
out_ev = avg_cs_from_events(
self.times,
self.times2,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=False,
)
out_ct = avg_cs_from_events(
self.bin_times,
self.bin_times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=False,
fluxes1=self.counts,
fluxes2=self.counts2,
)
if use_common_mean:
compare_tables(out_ev, out_ct, rtol=0.01)
else:
compare_tables(out_ev, out_ct, rtol=0.1)
@pytest.mark.parametrize("use_common_mean", [True, False])
@pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"])
def test_avg_cs_cts_and_err_and_events_are_equal(self, norm, use_common_mean):
out_ev = avg_cs_from_events(
self.times,
self.times2,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=False,
)
out_ct = avg_cs_from_events(
self.bin_times,
self.bin_times,
self.gti,
self.segment_size,
self.dt,
norm=norm,
use_common_mean=use_common_mean,
silent=False,
fluxes1=self.counts,
fluxes2=self.counts2,
errors1=self.errs,
errors2=self.errs2,
)
discard = [m for m in out_ev.meta.keys() if "variance" in m]
if use_common_mean:
compare_tables(out_ev, out_ct, rtol=0.01, discard=discard)
else:
compare_tables(out_ev, out_ct, rtol=0.1, discard=discard)
class TestNorms(object):
@classmethod
def setup_class(cls):
cls.mean = cls.var = 100000.
cls.N = 800000
cls.dt = 0.2
cls.df = 1 / (cls.N * cls.dt)
freq = fftfreq(cls.N, cls.dt)
good = freq > 0
cls.good = good
cls.meanrate = cls.mean / cls.dt
cls.lc = np.random.poisson(cls.mean, cls.N).astype(float)
cls.nph = np.sum(cls.lc)
cls.pds = (np.abs(np.fft.fft(cls.lc)) ** 2)[good]
cls.lc_bksub = cls.lc - cls.mean
cls.pds_bksub = (np.abs(np.fft.fft(cls.lc_bksub)) ** 2)[good]
cls.lc_renorm = cls.lc / cls.mean
cls.pds_renorm = (np.abs(np.fft.fft(cls.lc_renorm)) ** 2)[good]
cls.lc_renorm_bksub = cls.lc_renorm - 1
cls.pds_renorm_bksub = (np.abs(np.fft.fft(cls.lc_renorm_bksub)) ** 2)[good]
def test_leahy_bksub_var_vs_standard(self):
"""Test that the Leahy norm. does not change with background-subtracted lcs"""
leahyvar = normalize_leahy_from_variance(self.pds_bksub, np.var(self.lc_bksub), self.N)
leahy = 2 * self.pds / np.sum(self.lc)
ratio = np.mean(leahyvar / leahy)
assert np.isclose(ratio, 1, rtol=0.01)
def test_abs_bksub(self):
"""Test that the abs rms normalization does not change with background-subtracted lcs"""
ratio = normalize_abs(self.pds_bksub, self.dt, self.N) / normalize_abs(
self.pds, self.dt, self.N
)
assert np.isclose(ratio.mean(), 1, rtol=0.01)
def test_frac_renorm_constant(self):
"""Test that the fractional rms normalization is equivalent when renormalized"""
ratio = normalize_frac(self.pds_renorm, self.dt, self.N, 1) / normalize_frac(
self.pds, self.dt, self.N, self.mean
)
assert np.isclose(ratio.mean(), 1, rtol=0.01)
def test_frac_to_abs_ctratesq(self):
"""Test that fractional rms normalization x ctrate**2 is equivalent to abs renormalized"""
ratio = (
normalize_frac(self.pds, self.dt, self.N, self.mean)
/ normalize_abs(self.pds, self.dt, self.N)
* self.meanrate ** 2
)
assert np.isclose(ratio.mean(), 1, rtol=0.01)
def test_total_variance(self):
"""Test that the total variance of the unnormalized pds is the same as
the variance from the light curve
Attention: VdK defines the variance as sum (x - x0)**2.
The usual definition is divided by 'N'
"""
vdk_total_variance = np.sum((self.lc - self.mean) ** 2)
ratio = np.mean(self.pds) / vdk_total_variance
assert np.isclose(ratio.mean(), 1, rtol=0.01)
@pytest.mark.parametrize("norm", ["abs", "frac", "leahy"])
def test_poisson_level(self, norm):
pdsnorm = normalize_periodograms(self.pds, self.dt, self.N,
self.mean, n_ph=self.nph, norm=norm)
assert np.isclose(
pdsnorm.mean(), poisson_level(meanrate=self.meanrate, norm=norm), rtol=0.01
)
@pytest.mark.parametrize("norm", ["abs", "frac", "leahy"])
def test_poisson_level_real(self, norm):
pdsnorm = normalize_periodograms(
self.pds, self.dt, self.N, self.mean, n_ph=self.nph,
norm=norm, power_type="real"
)
assert np.isclose(
pdsnorm.mean(), poisson_level(meanrate=self.meanrate, norm=norm), rtol=0.01
)
@pytest.mark.parametrize("norm", ["abs", "frac", "leahy"])
def test_poisson_level_absolute(self, norm):
pdsnorm = normalize_periodograms(
self.pds, self.dt, self.N, self.mean, n_ph=self.nph,
norm=norm, power_type="abs"
)
assert np.isclose(
pdsnorm.mean(), poisson_level(meanrate=self.meanrate, norm=norm), rtol=0.01
)
def test_normalize_with_variance(self):
pdsnorm = normalize_periodograms(
self.pds, self.dt, self.N, self.mean, variance=self.var, norm="leahy"
)
assert np.isclose(pdsnorm.mean(), 2, rtol=0.01)
def test_normalize_with_variance_fails_if_variance_zero(self):
# If the variance is zero, it will fail:
with pytest.raises(ValueError) as excinfo:
pdsnorm = normalize_leahy_from_variance(self.pds, 0., self.N)
assert "The variance used to normalize the" in str(excinfo.value)
def test_normalize_none(self):
pdsnorm = normalize_periodograms(self.pds, self.dt, self.N,
self.mean, n_ph=self.nph, norm="none")
assert np.isclose(pdsnorm.mean(), self.pds.mean(), rtol=0.01)
def test_normalize_badnorm(self):
with pytest.raises(ValueError):
pdsnorm = normalize_periodograms(
self.pds, self.var, self.N, self.mean, n_ph=self.nph, norm="asdfjlasdjf"
)
|
{
"content_hash": "a2d79d75a7fdbe78105528f8ae693130",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 104,
"avg_line_length": 37.95217391304348,
"alnum_prop": 0.5614617940199336,
"repo_name": "StingraySoftware/stingray",
"id": "cabb124b97fc94d2759cb6f151688eeeedfd33fe",
"size": "17458",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stingray/tests/test_fourier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1200124"
},
{
"name": "Python",
"bytes": "1465633"
},
{
"name": "TeX",
"bytes": "8716"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'simple.views.index', name='index'),
)
|
{
"content_hash": "d63e66180594c840a2469a10b3ab6985",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6384615384615384,
"repo_name": "shenek/django-ogra",
"id": "d14b7f454176f2bbfa4d737d2d9546fdbeb5ade0",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple/simple/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "746"
},
{
"name": "JavaScript",
"bytes": "43"
},
{
"name": "Python",
"bytes": "15022"
}
],
"symlink_target": ""
}
|
class Solution:
# @param s, a string
# @return an integer
def longestValidParentheses(self, s):
result, prev, stack = 0, 0, []
# use index to get length information
for i in xrange(0, len(s)):
if s[i] == '(':
stack.append(i)
else:
if len(stack) > 0 and s[stack[-1]] == '(':
stack.pop()
prev = -1 if len(stack) == 0 else stack[-1]
result = max(result, i - prev)
else:
stack.append(i)
return result
s = Solution()
print s.longestValidParentheses("(()"), 2
print s.longestValidParentheses("(()(((()"), 2
print s.longestValidParentheses(")()())"), 4
print s.longestValidParentheses("()"), 2
print s.longestValidParentheses("()(()"), 2
print s.longestValidParentheses("(()(()))"), 8
print s.longestValidParentheses("("), 0
print s.longestValidParentheses(")"), 0
print s.longestValidParentheses(""), 0
|
{
"content_hash": "728162764e389d878317d7e8316a4e45",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 53,
"avg_line_length": 30.93103448275862,
"alnum_prop": 0.5997770345596433,
"repo_name": "yaoxuanw007/forfun",
"id": "618d0a1ee913a01b53a97f5b649789fe3f4a473c",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/python/longestValidParentheses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "1565"
},
{
"name": "JavaScript",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "169988"
}
],
"symlink_target": ""
}
|
import numpy as np
import scipy.sparse as sp
from pymg.transfer_base import TransferBase
class LinearTransferPeriodic(TransferBase):
"""Implementation of the linear prolongation and restriction operators
Attributes:
I_2htoh (scipy.sparse.csc_matrix): prolongation matrix
I_hto2h (scipy.sparse.csc_matrix): restriction matrix
"""
def __init__(self, ndofs_fine, ndofs_coarse, *args, **kwargs):
"""Initialization routine for transfer operators
Args:
ndofs_fine (int): number of DOFs on the fine grid
ndofs_coarse (int): number of DOFs on the coarse grid
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
"""
# for this particular transfer class, we need to make a few assumptions
assert isinstance(ndofs_fine, int), type(ndofs_fine)
assert isinstance(ndofs_coarse, int)
# assert (ndofs_fine) % 2 == 0
# assert ndofs_coarse == (ndofs_fine) / 2
if ndofs_fine % 2 == 1:
self.odd = True
assert ndofs_coarse == (ndofs_fine + 1) / 2 - 1
else:
self.odd = False
assert ndofs_coarse == (ndofs_fine) / 2
super(LinearTransferPeriodic, self).__init__(ndofs_fine, ndofs_coarse, *args, **kwargs)
# pre-compute prolongation and restriction matrices
self.I_2htoh = self.__get_prolongation_matrix(ndofs_coarse, ndofs_fine)
self.I_hto2h = self.__get_restriction_matrix()
def __get_prolongation_matrix(self, ndofs_coarse, ndofs_fine):
"""Helper routine for the prolongation operator
Args:
ndofs_fine (int): number of DOFs on the fine grid
ndofs_coarse (int): number of DOFs on the coarse grid
Returns:
scipy.sparse.csc_matrix: sparse prolongation matrix of size
`ndofs_fine` x `ndofs_coarse`
"""
# This is a workaround, since I am not aware of a suitable way to do
# this directly with sparse matrices.
if self.odd:
P = np.zeros((ndofs_fine, ndofs_coarse))
np.fill_diagonal(P[1::2, :], 1)
np.fill_diagonal(P[0::2, :], 1.0 / 2.0)
np.fill_diagonal(P[2::2, :], 1.0 / 2.0)
P[0, -1] = 1.0 / 2.0
P[-1, 0] = 1.0 / 2.0
else:
P = np.zeros((ndofs_fine, ndofs_coarse))
np.fill_diagonal(P[0::2, :], 1)
np.fill_diagonal(P[1::2, :], 1.0 / 2.0)
np.fill_diagonal(P[1::2, 1:], 1.0 / 2.0)
P[-1, 0] = 1.0 / 2.0
return sp.csc_matrix(P)
def __get_restriction_matrix(self):
"""Helper routine for the restriction operator
Returns:
scipy.sparse.csc_matrix: sparse restriction matrix of size
`ndofs_coarse` x `ndofs_fine`
"""
assert hasattr(self, 'I_2htoh')
return 0.5 * sp.csc_matrix(self.I_2htoh.T)
def restrict(self, u_coarse):
"""Routine to apply restriction
Args:
u_coarse (numpy.ndarray): vector on coarse grid, size `ndofs_coarse`
Returns:
numpy.ndarray: vector on fine grid, size `ndofs_fine`
"""
return self.I_hto2h.dot(u_coarse)
def prolong(self, u_fine):
"""Routine to apply prolongation
Args:
u_fine (numpy.ndarray): vector on fine grid, size `ndofs_fine`
Returns:
numpy.ndarray: vector on coarse grid, size `ndofs_coarse`
"""
return self.I_2htoh.dot(u_fine)
|
{
"content_hash": "a9f8b06f78806e683aabd9fe6e9f6441",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 95,
"avg_line_length": 36.101010101010104,
"alnum_prop": 0.576664801343033,
"repo_name": "Parallel-in-Time/pyMG-2016",
"id": "9eed922d35a9c38c3b4c2df3bcd1f077f894ff10",
"size": "3574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/linear_transfer_periodic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5785"
},
{
"name": "HTML",
"bytes": "1531431"
},
{
"name": "JavaScript",
"bytes": "20876"
},
{
"name": "Jupyter Notebook",
"bytes": "1135439"
},
{
"name": "Python",
"bytes": "167539"
}
],
"symlink_target": ""
}
|
"""Solar CLI api
On create "golden" resource should be moved to special place
"""
import click
from fabric import api as fabric_api
import json
import networkx as nx
import os
import sys
import tabulate
import yaml
from solar.core import actions
from solar.core import resource as sresource
from solar.core import signals
from solar.core.tags_set_parser import Expression
from solar.core.resource import virtual_resource as vr
from solar.core.log import log
from solar import errors
from solar.interfaces import orm
from solar import utils
from solar.cli import base
from solar.cli import executors
from solar.cli.orch import orchestration
from solar.cli.system_log import changes
from solar.cli.events import events
from solar.cli.resource import resource as cli_resource
# HELPERS
def format_resource_input(resource_input):
return '{}::{}'.format(
#click.style(resource_name, fg='white', bold=True),
resource_input.resource.name,
click.style(resource_input.name, fg='yellow')
)
def show_emitter_connections(emitter):
for emitter_input in emitter.resource_inputs().values():
click.echo(
'{} -> {}'.format(
format_resource_input(emitter_input),
'[{}]'.format(
', '.join(
format_resource_input(r)
for r in emitter_input.receivers.as_set()
)
)
)
)
@click.group(cls=base.AliasedGroup)
def main():
pass
def init_actions():
@main.command()
@click.option('-t', '--tags')
@click.option('-a', '--action')
@click.option('-d', '--dry-run', default=False, is_flag=True)
@click.option('-m', '--dry-run-mapping', default='{}')
def run(dry_run_mapping, dry_run, action, tags):
if dry_run:
dry_run_executor = executors.DryRunExecutor(mapping=json.loads(dry_run_mapping))
resources = filter(
lambda r: Expression(tags, r.tags).evaluate(),
orm.DBResource.all()
)
for r in resources:
resource_obj = sresource.load(r['id'])
actions.resource_action(resource_obj, action)
if dry_run:
click.echo('EXECUTED:')
for key in dry_run_executor.executed:
click.echo('{}: {}'.format(
click.style(dry_run_executor.compute_hash(key), fg='green'),
str(key)
))
def init_cli_connect():
@main.command()
@click.argument('emitter')
@click.argument('receiver')
@click.argument('mapping', default='')
def connect(mapping, receiver, emitter):
mapping_parsed = {}
click.echo('Connect {} to {}'.format(emitter, receiver))
emitter = sresource.load(emitter)
receiver = sresource.load(receiver)
try:
mapping_parsed.update(json.loads(mapping))
except ValueError:
for m in mapping.split():
k, v = m.split('->')
mapping_parsed.update({k: v})
signals.connect(emitter, receiver, mapping=mapping_parsed)
show_emitter_connections(emitter)
@main.command()
@click.argument('emitter')
@click.argument('receiver')
def disconnect(receiver, emitter):
click.echo('Disconnect {} from {}'.format(emitter, receiver))
emitter = sresource.load(emitter)
receiver = sresource.load(receiver)
click.echo(emitter)
click.echo(receiver)
signals.disconnect(emitter, receiver)
show_emitter_connections(emitter)
def init_cli_connections():
@main.group()
def connections():
pass
@connections.command()
def show():
resources = sresource.load_all()
for r in resources:
show_emitter_connections(r)
@connections.command()
@click.option('--start-with', default=None)
@click.option('--end-with', default=None)
def graph(start_with, end_with):
g = signals.detailed_connection_graph(start_with=start_with,
end_with=end_with)
nx.write_dot(g, 'graph.dot')
fabric_api.local('dot -Tsvg graph.dot -o graph.svg')
def run():
init_actions()
init_cli_connect()
init_cli_connections()
main.add_command(cli_resource)
main.add_command(orchestration)
main.add_command(changes)
main.add_command(events)
main()
if __name__ == '__main__':
run()
|
{
"content_hash": "dc23a234fb7e7e604709000a42769829",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 92,
"avg_line_length": 28.10625,
"alnum_prop": 0.6057371581054036,
"repo_name": "torgartor21/solar",
"id": "e7231ecb7f488ad83e79dcfba67e6521c5f46018",
"size": "5107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solar/solar/cli/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "82954"
},
{
"name": "Python",
"bytes": "289854"
},
{
"name": "Shell",
"bytes": "1785"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.utils.text import slugify
from django.db import IntegrityError
from contactnetwork.cube import compute_interactions
from contactnetwork.models import *
import contactnetwork.interaction as ci
from residue.models import ResidueGenericNumber, ResidueNumberingScheme, Residue, ResidueGenericNumberEquivalent
from structure.models import (Structure, StructureType, StructureSegment, StructureStabilizingAgent,PdbData,
Rotamer, StructureSegmentModeling, StructureCoordinates, StructureCoordinatesDescription, StructureEngineering,
StructureEngineeringDescription, Fragment)
import os, time
import yaml
from interaction.views import runcalculation,parsecalculation
from multiprocessing import Queue, Process, Value, Lock
class Command(BaseCommand):
help = "Output all uniprot mappings"
update = True
purge = True
processes = 8
def prepare_input(self, proc, items, iteration=1):
q = Queue()
procs = list()
num_items = len(items)
num = Value('i', 0)
lock = Lock()
if not num_items:
return False
# make sure not to use more jobs than proteins (chunk size will be 0, which is not good)
if proc > num_items:
proc = num_items
chunk_size = int(num_items / proc)
connection.close()
for i in range(0, proc):
first = chunk_size * i
if i == proc - 1:
last = False
else:
last = chunk_size * (i + 1)
p = Process(target=self.main_func, args=([(first, last), iteration,num,lock]))
procs.append(p)
p.start()
for p in procs:
p.join()
def purge_contact_network(self):
InteractingResiduePair.truncate()
Distance.truncate()
Interaction.truncate()
def build_contact_network(self,s,pdb_code):
interacting_pairs, distances = compute_interactions(pdb_code, save_to_db=True)
def handle(self, *args, **options):
self.ss = Structure.objects.all()
self.structure_data_dir = os.sep.join([settings.DATA_DIR, 'structure_data', 'structures'])
if self.purge:
self.purge_contact_network()
print(len(self.ss),'structures')
self.prepare_input(self.processes, self.ss)
# for s in Structure.objects.all():
# self.purge_contact_network(s)
# self.build_contact_network(s,s.pdb_code.index)
def main_func(self, positions, iteration,count,lock):
# filenames
# if not positions[1]:
# filenames = self.filenames[positions[0]:]
# else:
# filenames = self.filenames[positions[0]:positions[1]]
ss = self.ss
while count.value<len(ss):
with lock:
if count.value<len(ss):
s = ss[count.value]
count.value +=1
# print(s, count.value)
else:
break
source_file_path = os.sep.join([self.structure_data_dir, s.pdb_code.index.upper() + ".yaml"])
if os.path.isfile(source_file_path):
with open(source_file_path, 'r') as f:
sd = yaml.load(f, Loader=yaml.FullLoader)
peptide_chain = ""
if 'ligand' in sd and sd['ligand'] and sd['ligand']!='None':
if isinstance(sd['ligand'], list):
ligands = sd['ligand']
else:
ligands = [sd['ligand']]
for ligand in ligands:
peptide_chain = ""
if 'chain' in ligand:
peptide_chain = ligand['chain']
# self.purge_contact_network(s)
current = time.time()
if self.update:
if Distance.objects.filter(structure=s).count():
print(s,'already done - skipping')
continue
try:
self.build_contact_network(s,s.pdb_code.index)
print(s,"Contact Network",time.time()-current)
except:
print(s,'Failed contact network')
# current = time.time()
#runcalculation(s.pdb_code.index,peptide_chain)
#parsecalculation(s.pdb_code.index,False)
#print(s,"Ligand Interactions",time.time()-current)
|
{
"content_hash": "7fec217dd571a2307b95f6dd0701ea6c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 115,
"avg_line_length": 34.97727272727273,
"alnum_prop": 0.5806800952999783,
"repo_name": "protwis/protwis",
"id": "04ccd6c5895f462cfd1e4fa79c8eb35daf429e71",
"size": "4617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/management/commands/rebuild_all_contact_networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167612"
},
{
"name": "HTML",
"bytes": "2477269"
},
{
"name": "JavaScript",
"bytes": "3119217"
},
{
"name": "Promela",
"bytes": "467"
},
{
"name": "Python",
"bytes": "4289933"
}
],
"symlink_target": ""
}
|
from boto.mashups.interactive import interactive_shell
import boto
import os
import time
import shutil
import StringIO
import paramiko
import socket
import subprocess
class SSHClient(object):
def __init__(self, server,
host_key_file='~/.ssh/known_hosts',
uname='root', ssh_pwd=None):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file,
password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self):
retry = 0
while retry < 5:
try:
self._ssh_client.connect(self.server.hostname,
username=self.uname,
pkey=self._pkey)
return
except socket.error, (value,message):
if value == 61 or value == 111:
print 'SSH Connection refused, will retry in 5 seconds'
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
print 'Edit that file to remove the entry and then hit return to try again'
raw_input('Hit Enter when ready')
retry += 1
except EOFError:
print 'Unexpected Error from SSH Connection, retry in 5 seconds'
time.sleep(5)
retry += 1
print 'Could not establish SSH connection'
def open_sftp(self):
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote system and return a file-like object.
"""
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
def listdir(self, path):
sftp_client = self.open_sftp()
return sftp_client.listdir(path)
def isdir(self, path):
status = self.run('[ -d %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def exists(self, path):
status = self.run('[ -a %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def shell(self):
"""
Start an interactive shell session on the remote host.
"""
channel = self._ssh_client.invoke_shell()
interactive_shell(channel)
def run(self, command):
"""
Execute a command on the remote host. Return a tuple containing
an integer status and a two strings, the first containing stdout
and the second containing stderr from the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
boto.log.debug('stdout: %s' % std_out)
boto.log.debug('stderr: %s' % std_err)
return (status, std_out, std_err)
def run_pty(self, command):
"""
Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
transport = self._ssh_client.get_transport()
transport.close()
self.server.reset_cmdshell()
class LocalClient(object):
def __init__(self, server, host_key_file=None, uname='root'):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
def get_file(self, src, dst):
shutil.copyfile(src, dst)
def put_file(self, src, dst):
shutil.copyfile(src, dst)
def listdir(self, path):
return os.listdir(path)
def isdir(self, path):
return os.path.isdir(path)
def exists(self, path):
return os.path.exists(path)
def shell(self):
raise NotImplementedError, 'shell not supported with LocalClient'
def run(self):
boto.log.info('running:%s' % self.command)
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.poll() == None:
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info(log_fp.getvalue())
boto.log.info('output: %s' % log_fp.getvalue())
return (process.returncode, log_fp.getvalue())
def close(self):
pass
class FakeServer(object):
"""
A little class to fake out SSHClient (which is expecting a
:class`boto.manage.server.Server` instance. This allows us
to
"""
def __init__(self, instance, ssh_key_file):
self.instance = instance
self.ssh_key_file = ssh_key_file
self.hostname = instance.dns_name
self.instance_id = self.instance.id
def start(server):
instance_id = boto.config.get('Instance', 'instance-id', None)
if instance_id == server.instance_id:
return LocalClient(server)
else:
return SSHClient(server)
def sshclient_from_instance(instance, ssh_key_file,
host_key_file='~/.ssh/known_hosts',
user_name='root', ssh_pwd=None):
"""
Create and return an SSHClient object given an
instance object.
:type instance: :class`boto.ec2.instance.Instance` object
:param instance: The instance object.
:type ssh_key_file: str
:param ssh_key_file: A path to the private key file used
to log into instance.
:type host_key_file: str
:param host_key_file: A path to the known_hosts file used
by the SSH client.
Defaults to ~/.ssh/known_hosts
:type user_name: str
:param user_name: The username to use when logging into
the instance. Defaults to root.
:type ssh_pwd: str
:param ssh_pwd: The passphrase, if any, associated with
private key.
"""
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd)
|
{
"content_hash": "9293776fb670fee3d055d84229842e22",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 105,
"avg_line_length": 33.45909090909091,
"alnum_prop": 0.5670425213965494,
"repo_name": "kumar303/rockit",
"id": "b21898c02d0c6c1d4a1dadd6a3c2aebeab2605fd",
"size": "8466",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor-local/boto/manage/cmdshell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4587"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "4139254"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
from telapi_helper.rest import TelapiRestClient
def test_client_init():
telapi = TelapiRestClient("AC123", "SECRET")
|
{
"content_hash": "a4c5ddfd726233f32cef2594be1fd0bb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 48,
"avg_line_length": 30.5,
"alnum_prop": 0.7540983606557377,
"repo_name": "kevinburke/telapi-python",
"id": "787881c3e09d61c244e1a0b05ad9f1cfa71e1a49",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Patched version of nose doctest plugin.
See https://github.com/nose-devs/nose/issues/7
"""
from nose.plugins.doctests import *
class _DoctestFix(Doctest):
def options(self, parser, env):
super(_DoctestFix, self).options(parser, env)
parser.add_option('--doctest-options', action="append",
dest="doctestOptions",
metavar="OPTIONS",
help="Specify options to pass to doctest. " +
"Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
def configure(self, options, config):
super(_DoctestFix, self).configure(options, config)
self.optionflags = 0
if options.doctestOptions:
flags = ",".join(options.doctestOptions).split(',')
for flag in flags:
try:
if flag.startswith('+'):
self.optionflags |= getattr(doctest, flag[1:])
elif flag.startswith('-'):
self.optionflags &= ~getattr(doctest, flag[1:])
else:
raise ValueError(
"Must specify doctest options with starting " +
"'+' or '-'. Got %s" % (flag,))
except AttributeError:
raise ValueError("Unknown doctest option %s" %
(flag[1:],))
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
log.debug("loading from %s", module)
if not self.matches(module.__name__):
log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
log.exception("Attribute error loading from %s", module)
# nose allows module.__test__ = False; doctest does not and throws
# AttributeError
return
if not tests:
log.debug("No tests found in %s", module)
return
tests.sort()
module_file = src(module.__file__)
# FIXME this breaks the id plugin somehow (tests probably don't
# get wrapped in result proxy or something)
cases = []
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
cases.append(DocTestCase(test,
optionflags=self.optionflags,
result_var=self.doctest_result_var))
if cases:
yield self.suiteClass(cases, context=module, can_split=False)
def loadTestsFromFile(self, filename):
"""Load doctests from the file.
Tests are loaded only if filename's extension matches
configured doctest extension.
"""
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {'__file__': filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(
fixt_mod, globals(), locals(), ["nop"])
except ImportError, e:
log.debug(
"Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s",
fixt_mod, fixture_context)
if hasattr(fixture_context, 'globs'):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name,
filename=filename, lineno=0)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, 'setup_test', None),
tearDown=getattr(fixture_context, 'teardown_test', None),
result_var=self.doctest_result_var)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
doctests = self.finder.find(obj, module=getmodule(parent), name=name)
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
result_var=self.doctest_result_var)
def _plugin_supports_doctest_options(plugin_cls):
import optparse
plugin = plugin_cls()
parser = optparse.OptionParser()
plugin.options(parser, {})
return parser.has_option('--doctest-options')
if _plugin_supports_doctest_options(Doctest):
class DoctestFix(Doctest):
pass
else:
class DoctestFix(_DoctestFix):
pass
|
{
"content_hash": "7fa68fc95f007e8de2a256f15bf35165",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 39.31944444444444,
"alnum_prop": 0.5148357470858353,
"repo_name": "cortext/crawtextV2",
"id": "f3e318733a481b38fe70db376b6b5a0fd752f5d9",
"size": "5686",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "~/venvs/crawler/lib/python2.7/site-packages/nltk/test/doctest_nose_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40124"
},
{
"name": "Python",
"bytes": "8533233"
},
{
"name": "Shell",
"bytes": "3811"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range
data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
dates = bdate_range('1/1/2011', periods=10)
# fixture names must be compatible with the tests in
# tests/frame/test_api.SharedWithSparse
@pytest.fixture
def float_frame_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
return DataFrame(data, index=dates)
@pytest.fixture
def float_frame():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
# default_kind='block' is the default
return SparseDataFrame(data, index=dates, default_kind='block')
@pytest.fixture
def float_frame_int_kind():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
Some entries are missing.
"""
return SparseDataFrame(data, index=dates, default_kind='integer')
@pytest.fixture
def float_string_frame():
"""
Fixture for sparse DataFrame of floats and strings with DatetimeIndex
Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
"""
sdf = SparseDataFrame(data, index=dates)
sdf['foo'] = SparseArray(['bar'] * len(dates))
return sdf
@pytest.fixture
def float_frame_fill0_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 0
return DataFrame(values, columns=['A', 'B', 'C', 'D'], index=dates)
@pytest.fixture
def float_frame_fill0():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 0
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=dates)
@pytest.fixture
def float_frame_fill2_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 2
return DataFrame(values, columns=['A', 'B', 'C', 'D'], index=dates)
@pytest.fixture
def float_frame_fill2():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 2
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2, index=dates)
@pytest.fixture
def empty_frame():
"""
Fixture for empty SparseDataFrame
"""
return SparseDataFrame()
|
{
"content_hash": "06f7359e6d0fcaeb4c44bae8ca836b41",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 28.026086956521738,
"alnum_prop": 0.6313993174061433,
"repo_name": "cbertinato/pandas",
"id": "3423260c1720adb1baab1e1267497226c022e0c2",
"size": "3223",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandas/tests/sparse/frame/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394466"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "15010333"
},
{
"name": "Shell",
"bytes": "27209"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Phrases(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Phrases Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Phrases, self).__init__(temboo_session, '/Library/SunlightLabs/CapitolWords/Phrases')
def new_input_set(self):
return PhrasesInputSet()
def _make_result_set(self, result, path):
return PhrasesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PhrasesChoreographyExecution(session, exec_id, path)
class PhrasesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Phrases
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Sunlight Labs.)
"""
super(PhrasesInputSet, self)._set_input('APIKey', value)
def set_Chamber(self, value):
"""
Set the value of the Chamber input for this Choreo. ((optional, string) Limit results to a particular chamber. Valid values: house, senate, extensions.)
"""
super(PhrasesInputSet, self)._set_input('Chamber', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, string) Show results for only the given date. Format: YYYY-MM-DD)
"""
super(PhrasesInputSet, self)._set_input('Date', value)
def set_EndDate(self, value):
"""
Set the value of the EndDate input for this Choreo. ((optional, string) Limit results to those on or before the given date. Format: YYYY-MM-DD.)
"""
super(PhrasesInputSet, self)._set_input('EndDate', value)
def set_EntityType(self, value):
"""
Set the value of the EntityType input for this Choreo. ((required, string) The entity type to get top phrases for. Acceptable values: date, month, state, legislator.)
"""
super(PhrasesInputSet, self)._set_input('EntityType', value)
def set_EntityValue(self, value):
"""
Set the value of the EntityValue input for this Choreo. ((required, string) The value of the entity to get top phrases for. Acceptable formats as follows for each EntityType: (date) 2011-11-09, (month) 201111, (state) NY. For the legislator EntityType, enter Bioguide ID here.)
"""
super(PhrasesInputSet, self)._set_input('EntityValue', value)
def set_Length(self, value):
"""
Set the value of the Length input for this Choreo. ((optional, integer) The length of the phrase, in words, to search for (up to 5).)
"""
super(PhrasesInputSet, self)._set_input('Length', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to show. 100 results are shown at a time. To see more results use the page parameter.)
"""
super(PhrasesInputSet, self)._set_input('Page', value)
def set_Party(self, value):
"""
Set the value of the Party input for this Choreo. ((optional, string) Limit results to members of congress from a given party. Valid values: R, D, I.)
"""
super(PhrasesInputSet, self)._set_input('Party', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Output formats inlcude json and xml. Defaults to json.)
"""
super(PhrasesInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, string) The metric and direction to sort by. Acceptable values: tfidf asc (default), tfidf desc, count asc, count desc.)
"""
super(PhrasesInputSet, self)._set_input('Sort', value)
def set_StartDate(self, value):
"""
Set the value of the StartDate input for this Choreo. ((optional, string) Limit results to those on or after the given date. Format: YYYY-MM-DD)
"""
super(PhrasesInputSet, self)._set_input('StartDate', value)
def set_State(self, value):
"""
Set the value of the State input for this Choreo. ((optional, string) Limit results to members from a particular state. Format: 2-letter state abbreviation (e.g. MD, RI, NY))
"""
super(PhrasesInputSet, self)._set_input('State', value)
class PhrasesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Phrases Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from CapitolWords.)
"""
return self._output.get('Response', None)
class PhrasesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PhrasesResultSet(response, path)
|
{
"content_hash": "956674192fd9fcc3ce29942f640039f1",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 285,
"avg_line_length": 47.706896551724135,
"alnum_prop": 0.6653415251174557,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "dcb8e1d20a930c24190e02b5b0ef53d867903155",
"size": "6467",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/SunlightLabs/CapitolWords/Phrases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
"""
Launch containers for quality checks in Seadata
"""
import os
import json
import time
import requests
from seadata.apis.commons.cluster import ClusterContainerEndpoint
from b2stage.apis.commons.endpoint import MISSING_BATCH, NOT_FILLED_BATCH
from b2stage.apis.commons.endpoint import BATCH_MISCONFIGURATION
from seadata.apis.commons.cluster import INGESTION_DIR, MOUNTPOINT
from b2stage.apis.commons.b2handle import B2HandleEndpoint
from restapi import decorators as decorate
from restapi.protocols.bearer import authentication
from restapi.flask_ext.flask_irods.client import IrodsException
from restapi.utilities.htmlcodes import hcodes
from b2stage.apis.commons import path
from restapi.utilities.logs import log
class Resources(B2HandleEndpoint, ClusterContainerEndpoint):
# schema_expose = True
labels = ['seadatacloud', 'ingestion']
depends_on = ['RESOURCES_PROJECT', 'SEADATA_PROJECT']
GET = {
'/ingestion/<string:batch_id>/qc/<string:qc_name>': {
'custom': {},
'summary': 'Resources management',
'responses': {'200': {'description': 'unknown'}},
}
}
PUT = {
'/ingestion/<string:batch_id>/qc/<string:qc_name>': {
'custom': {},
'summary': 'Launch a quality check as a docker container',
'responses': {'200': {'description': 'unknown'}},
}
}
DELETE = {
'/ingestion/<string:batch_id>/qc/<string:qc_name>': {
'custom': {},
'summary': 'Remove a quality check if existing',
'responses': {'200': {'description': 'unknown'}},
}
}
@decorate.catch_error(exception=IrodsException, exception_label='B2SAFE')
@authentication.required()
def get(self, batch_id, qc_name):
""" Check my quality check container """
# log.info("Request for resources")
rancher = self.get_or_create_handle()
container_name = self.get_container_name(batch_id, qc_name, rancher._qclabel)
# resources = rancher.list()
container = rancher.get_container_object(container_name)
if container is None:
return self.send_errors(
'Quality check does not exist', code=hcodes.HTTP_BAD_NOTFOUND
)
logs = rancher.recover_logs(container_name)
# print("TEST", container_name, tmp)
errors_keys = ['failure', 'failed', 'error']
errors = []
for line in logs.lower().split('\n'):
if line.strip() == '':
continue
for key in errors_keys:
if key in line:
errors.append(line)
break
response = {
'batch_id': batch_id,
'qc_name': qc_name,
'state': container.get('state'),
'errors': errors,
}
if container.get('transitioning') == 'error':
response['errors'].append(container.get('transitioningMessage'))
"""
"state": "stopped", / error
"firstRunningTS": 1517431685000,
"transitioning": "no",
"transitioning": "error",
"transitioningMessage": "Image
"""
return response
@decorate.catch_error(exception=IrodsException, exception_label='B2SAFE')
@authentication.required()
def put(self, batch_id, qc_name):
""" Launch a quality check inside a container """
###########################
# get name from batch
# imain = self.get_service_instance(service_name='irods')
try:
imain = self.get_main_irods_connection()
batch_path = self.get_irods_batch_path(imain, batch_id)
local_path = path.join(MOUNTPOINT, INGESTION_DIR, batch_id)
log.info("Batch irods path: {}", batch_path)
log.info("Batch local path: {}", local_path)
batch_status, batch_files = self.get_batch_status(imain, batch_path, local_path)
if batch_status == MISSING_BATCH:
return self.send_errors(
"Batch '{}' not found (or no permissions)".format(batch_id),
code=hcodes.HTTP_BAD_NOTFOUND,
)
if batch_status == NOT_FILLED_BATCH:
return self.send_errors(
"Batch '{}' not yet filled".format(batch_id), code=hcodes.HTTP_BAD_RESOURCE
)
if batch_status == BATCH_MISCONFIGURATION:
log.error(
'Misconfiguration: {} files in {} (expected 1)',
len(batch_files),
batch_path,
)
return self.send_errors(
"Misconfiguration for batch_id {}".format(batch_id),
code=hcodes.HTTP_BAD_RESOURCE,
)
except requests.exceptions.ReadTimeout:
return self.send_errors(
"B2SAFE is temporarily unavailable",
code=hcodes.HTTP_SERVICE_UNAVAILABLE
)
###################
# Parameters (and checks)
envs = {}
input_json = self.get_input()
# TODO: backdoor check - remove me
bd = input_json.pop('eudat_backdoor', False)
if bd:
im_prefix = 'eudat'
else:
im_prefix = 'maris'
log.debug("Image prefix: {}", im_prefix)
# input parameters to be passed to container
pkey = "parameters"
param_keys = [
"request_id",
"edmo_code",
"datetime",
"api_function",
"version",
"test_mode",
pkey,
]
for key in param_keys:
if key == pkey:
continue
value = input_json.get(key, None)
if value is None:
return self.send_errors(
'Missing JSON key: {}'.format(key), code=hcodes.HTTP_BAD_REQUEST
)
response = {
'batch_id': batch_id,
'qc_name': qc_name,
'status': 'executed',
'input': input_json,
}
###################
try:
rancher = self.get_or_create_handle()
except BaseException as e:
log.critical(str(e))
return self.send_errors(
'Cannot establish a connection with Rancher',
code=hcodes.HTTP_SERVER_ERROR,
)
container_name = self.get_container_name(batch_id, qc_name, rancher._qclabel)
# Duplicated quality checks on the same batch are not allowed
container_obj = rancher.get_container_object(container_name)
if container_obj is not None:
log.error("Docker container {} already exists!", container_name)
response['status'] = 'existing'
code = hcodes.HTTP_BAD_CONFLICT
return self.force_response(response, errors=[response['status']], code=code)
docker_image_name = self.get_container_image(qc_name, prefix=im_prefix)
###########################
# ## ENVS
host_ingestion_path = self.get_ingestion_path_on_host(batch_id)
container_ingestion_path = self.get_ingestion_path_in_container()
envs['BATCH_DIR_PATH'] = container_ingestion_path
from seadata.apis.commons.queue import QUEUE_VARS
from seadata.apis.commons.cluster import CONTAINERS_VARS
for key, value in QUEUE_VARS.items():
if key in ['enable']:
continue
elif key == 'user':
value = CONTAINERS_VARS.get('rabbituser')
elif key == 'password':
value = CONTAINERS_VARS.get('rabbitpass')
envs['LOGS_' + key.upper()] = value
# envs['DB_USERNAME'] = CONTAINERS_VARS.get('dbuser')
# envs['DB_PASSWORD'] = CONTAINERS_VARS.get('dbpass')
# envs['DB_USERNAME_EDIT'] = CONTAINERS_VARS.get('dbextrauser')
# envs['DB_PASSWORD_EDIT'] = CONTAINERS_VARS.get('dbextrapass')
# FOLDER inside /batches to store temporary json inputs
# TODO: to be put into the configuration
JSON_DIR = 'json_inputs'
# Mount point of the json dir into the QC container
QC_MOUNTPOINT = '/json'
json_path_backend = os.path.join(MOUNTPOINT, INGESTION_DIR, JSON_DIR)
if not os.path.exists(json_path_backend):
log.info("Creating folder {}", json_path_backend)
os.mkdir(json_path_backend)
json_path_backend = os.path.join(json_path_backend, batch_id)
if not os.path.exists(json_path_backend):
log.info("Creating folder {}", json_path_backend)
os.mkdir(json_path_backend)
json_input_file = "input.{}.json".format(int(time.time()))
json_input_path = os.path.join(json_path_backend, json_input_file)
with open(json_input_path, "w+") as f:
f.write(json.dumps(input_json))
json_path_qc = self.get_ingestion_path_on_host(JSON_DIR)
json_path_qc = os.path.join(json_path_qc, batch_id)
envs['JSON_FILE'] = os.path.join(QC_MOUNTPOINT, json_input_file)
extra_params = {
'dataVolumes': [
"{}:{}".format(host_ingestion_path, container_ingestion_path),
"{}:{}".format(json_path_qc, QC_MOUNTPOINT),
],
'environment': envs,
}
if bd:
extra_params['command'] = ['/bin/sleep', '999999']
# log.info(extra_params)
###########################
errors = rancher.run(
container_name=container_name,
image_name=docker_image_name,
private=True,
extras=extra_params,
)
if errors is not None:
if isinstance(errors, dict):
edict = errors.get('error', {})
# This case should never happens, since already verified before
if edict.get('code') == 'NotUnique':
response['status'] = 'existing'
code = hcodes.HTTP_BAD_CONFLICT
else:
response['status'] = 'could NOT be started'
response['description'] = edict
code = hcodes.HTTP_SERVER_ERROR
else:
response['status'] = 'failure'
code = hcodes.HTTP_SERVER_ERROR
return self.force_response(response, errors=[response['status']], code=code)
return response
@decorate.catch_error(exception=IrodsException, exception_label='B2SAFE')
@authentication.required()
def delete(self, batch_id, qc_name):
"""
Remove a quality check executed
"""
rancher = self.get_or_create_handle()
container_name = self.get_container_name(batch_id, qc_name, rancher._qclabel)
rancher.remove_container_by_name(container_name)
# wait up to 10 seconds to verify the deletion
log.info("Removing: {}...", container_name)
removed = False
for _ in range(0, 20):
time.sleep(0.5)
container_obj = rancher.get_container_object(container_name)
if container_obj is None:
log.info("{} removed", container_name)
removed = True
break
else:
log.verbose("{} still exists", container_name)
if not removed:
log.warning("{} still in removal status", container_name)
response = {
'batch_id': batch_id,
'qc_name': qc_name,
'status': 'not_yet_removed',
}
else:
response = {'batch_id': batch_id, 'qc_name': qc_name, 'status': 'removed'}
return response
|
{
"content_hash": "736c70423400adbecf6cd7fd051456ad",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 95,
"avg_line_length": 36.81308411214953,
"alnum_prop": 0.5504781247355505,
"repo_name": "EUDAT-B2STAGE/http-api",
"id": "607b171558715c82d7cc1515860dd35d78db142f",
"size": "11843",
"binary": false,
"copies": "1",
"ref": "refs/heads/1.1.1",
"path": "projects/seadata/backend/apis/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2409"
},
{
"name": "HTML",
"bytes": "984"
},
{
"name": "Python",
"bytes": "304207"
},
{
"name": "Shell",
"bytes": "7226"
}
],
"symlink_target": ""
}
|
import numpy as np
# helper methods for printing scores
def get_score_summary(name, scores, weighted_average):
summary = '%.3f-%.3f (avg=%.5f mean=%.5f std=%.5f)' % (min(scores), max(scores), weighted_average,
np.mean(scores), np.std(scores))
score_list = ['%.3f' % score for score in scores]
return '%s %s [%s]' % (name, summary, ','.join(score_list))
def print_results(summaries):
summaries.sort(cmp=lambda x,y: cmp(x[1], y[1]))
if len(summaries) > 1:
print 'summaries'
for s, mean in summaries:
print s
|
{
"content_hash": "214cf3d9cdd59628b12a94be3c930363",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 102,
"avg_line_length": 36.294117647058826,
"alnum_prop": 0.5591572123176661,
"repo_name": "udibr/seizure-prediction",
"id": "8264c605f9f0c97cd669d7259761ca3f4396ae42",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seizure/scores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175238"
},
{
"name": "Shell",
"bytes": "2081"
}
],
"symlink_target": ""
}
|
'''
Created on 23.09.2011
@author: michi
'''
from ems.auth import AuthenticatedUser, AuthGroup, Permission
from ems.auth import UserProvider as BaseProvider
from ems.auth import UserNotFoundError
class SAOrmAuthGroup(AuthGroup):
def __init__(self, sourceObject, adapter):
self.__sourceObject = sourceObject
self._adapter = adapter
def _getSourceObject(self):
return self.__sourceObject
def _getId(self):
propName = self._adapter.getPropertyName(UserProvider.GROUP_ID)
return self.__sourceObject.__getattribute__(propName)
def _getName(self):
propName = self._adapter.getPropertyName(UserProvider.GROUP_NAME)
return self.__sourceObject.__getattribute__(propName)
def _getPermissions(self):
propName = self._adapter.getPropertyName(UserProvider.GROUP_PERMISSIONS)
codeProp = self._adapter.getPropertyName(UserProvider.PERMISSION_CODE)
accessProp = self._adapter.getPropertyName(UserProvider.PERMISSION_ACCESS)
titleProp = self._adapter.getPropertyName(UserProvider.PERMISSION_TITLE)
perms = set()
for ormPerm in self.__sourceObject.__getattribute__(propName):
code = ormPerm.__getattribute__(codeProp)
access = ormPerm.__getattribute__(accessProp)
title = ''
perms.add(Permission(code, title, access))
return perms
class SAOrmAuthenticatedUser(AuthenticatedUser):
def __init__(self, sourceObject, adapter):
self.__sourceObject = sourceObject
self._adapter = adapter
self._mainGroup = None
def _getSourceObject(self):
return self.__sourceObject
def _getId(self):
propName = self._adapter.getPropertyName(UserProvider.USER_ID)
return self.__sourceObject.__getattribute__(propName)
def _getName(self):
propName = self._adapter.getPropertyName(UserProvider.USER_NAME)
return self.__sourceObject.__getattribute__(propName)
def _getMainGroup(self):
if self._mainGroup is None:
propName = self._adapter.getPropertyName(UserProvider.USER_GROUP)
group = self.__sourceObject.__getattribute__(propName)
self._mainGroup = SAOrmAuthGroup(group, self._adapter)
return self._mainGroup
def _getGroups(self):
return [self._getMainGroup()]
class UserProvider(BaseProvider):
USER_ID = 'user_id'
USER_NAME = 'user_name'
USER_PASSWORD = 'user_password'
USER_GROUP = 'user_group'
GROUP_ID = 'group_id'
GROUP_NAME = 'group_name'
GROUP_PERMISSIONS = 'group_permissions'
PERMISSION_CODE = 'permission'
PERMISSION_ACCESS = 'access'
PERMISSION_TITLE = 'title'
def __init__(self, sessionGetter, userClass=None, propertyMap=None,
groupClass=None):
self._sessionGetter = sessionGetter
self._session = None
self.userClass = userClass
self.groupClass = groupClass
self._propertyMap = {}
if propertyMap is not None:
self.propertyMap = propertyMap
@property
def session(self):
if self._session is None:
if callable(self._sessionGetter):
self._session = self._sessionGetter()
else:
self._session = self._sessionGetter
return self._session
def getPropertyMap(self):
return self._propertyMap
def setPropertyMap(self, map):
self._propertyMap = map
propertyMap = property(getPropertyMap, setPropertyMap)
def getPropertyName(self, name):
if name not in self._propertyMap:
raise KeyError("PropertyMap does not contain key {0}".format(name))
return self._propertyMap[name]
def findByCredentials(self, **kwargs):
if 'username' not in kwargs:
raise UserNotFoundError()
filterByArgs = {self.getPropertyName(self.USER_NAME):kwargs['username']}
user = self._getUserFromDB(filterByArgs)
if not isinstance(user, self.userClass):
raise UserNotFoundError()
return SAOrmAuthenticatedUser(user, self)
def _getUserFromDB(self, filterArgs):
return self.session.query(self.userClass).filter_by(**filterArgs).first()
def getUserName(self):
return self.currentAuthenticatedObject.__getattribute__(self.accountPropertyName)
def getGroupName(self):
pass
|
{
"content_hash": "0799fce4680425c46886074113a7f8f0",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 89,
"avg_line_length": 31.928571428571427,
"alnum_prop": 0.6496644295302013,
"repo_name": "mtils/ems",
"id": "3067c52073ea3c6448779abc2560940f7e70962b",
"size": "4470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ems/auth/sqlalchemy/userprovider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3631"
},
{
"name": "Python",
"bytes": "1924893"
},
{
"name": "QML",
"bytes": "16755"
}
],
"symlink_target": ""
}
|
import actualSDN
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
import os
import socket
import json
from ryu.lib import hub
SOCKFILE = '/tmp/hello_sock'
class actualSDN_Django_Switch(actualSDN.actualSDN_switch):
def __init__(self, *args, **kwargs):
super(actualSDN_Django_Switch, self).__init__(*args, **kwargs)
self.sock = None
self.config = {}
self.start_sock_server()
def set_vtable(self, host, vlan):
if self.vtable[host] != vlan:
del self.vtable[host]
self.vtable.update({host:vlan})
print("Change")
print(self.vtable)
self.ShortestPathDeleteFlow(self.default_datapath, host)
def recv_loop(self):
while True:
print('wait rcv')
data = self.sock.recv(1024)
msg = json.loads(data)
print("print msg")
print(msg)
if self.host_enter >= self.host_num:
self.default_path_install(self.default_ev)
for host, vlan in msg.items():
self.set_vtable(str(host).rstrip(' '), str(vlan))
print(self.vtable)
def start_sock_server(self):
if os.path.exists(SOCKFILE):
os.unlink(SOCKFILE)
self.sock = hub.socket.socket(hub.socket.AF_UNIX, hub.socket.SOCK_DGRAM)
self.sock.bind(SOCKFILE)
hub.spawn(self.recv_loop)
print('success start sock')
|
{
"content_hash": "0825f620b12cf6c277f5313db57caa5c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 74,
"avg_line_length": 25.49019607843137,
"alnum_prop": 0.7023076923076923,
"repo_name": "ray6/sdn",
"id": "23a33671a7d4c9fdadc84d6b9727675af650586d",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actualSDN_django_switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "7565"
},
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "Python",
"bytes": "94815"
}
],
"symlink_target": ""
}
|
"""Perform GATK based filtering, perferring variant quality score recalibration.
Performs hard filtering when VQSR fails on smaller sets of variant calls.
"""
import os
import toolz as tz
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import gatkjoint, vcfutils, vfilter
def run(call_file, ref_file, vrn_files, data):
"""Run filtering on the input call file, handling SNPs and indels separately.
For VQSR, need to split the file to apply. For hard filters can run on the original
filter, filtering by bcftools type.
"""
algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1]))
if config_utils.use_vqsr(algs):
assert "gvcf" not in dd.get_tools_on(data), \
("Cannot force gVCF output and use VQSR. Try using hard filtering with tools_off: [vqsr]")
snp_file, indel_file = vcfutils.split_snps_indels(call_file, ref_file, data["config"])
snp_filter_file = _variant_filtration(snp_file, ref_file, vrn_files, data, "SNP",
vfilter.gatk_snp_hard)
indel_filter_file = _variant_filtration(indel_file, ref_file, vrn_files, data, "INDEL",
vfilter.gatk_indel_hard)
orig_files = [snp_filter_file, indel_filter_file]
out_file = "%scombined.vcf.gz" % os.path.commonprefix(orig_files)
combined_file = vcfutils.combine_variant_files(orig_files, out_file, ref_file, data["config"])
return _filter_nonref(combined_file, data)
else:
snp_filter = vfilter.gatk_snp_hard(call_file, data)
indel_filter = vfilter.gatk_indel_hard(snp_filter, data)
if "gvcf" not in dd.get_tools_on(data):
return _filter_nonref(indel_filter, data)
else:
return indel_filter
_MISSING_HEADERS = """##FORMAT=<ID=PGT,Number=1,Type=String,Description="Physical phasing haplotype information, describing how the alternate alleles are phased in relation to one another">
##FORMAT=<ID=PID,Number=1,Type=String,Description="Physical phasing ID information, where each unique ID within a given sample (but not across samples) connects records within a phasing group">
"""
def _filter_nonref(in_file, data):
"""Fixes potential issues from GATK processing and merging
- Remove NON_REF gVCF items from GATK VCF output; these occasionally sneak
through in joint calling.
- Add headers for physical phasing. These are not always present and the
header definitions can be lost during merging.
"""
out_file = "%s-gatkclean%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-updateheaders.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
out_handle.write(_MISSING_HEADERS)
cmd = ("bcftools annotate -h {header_file} -o - {in_file} | "
"grep -v NON_REF | bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Remove stray NON_REF gVCF information from VCF output", data)
vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def _apply_vqsr(in_file, ref_file, recal_file, tranch_file,
sensitivity_cutoff, filter_type, data):
"""Apply VQSR based on the specified tranche, returning a filtered VCF file.
"""
broad_runner = broad.runner_from_config(data["config"])
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-{filter}filter{ext}".format(base=base, ext=ext,
filter=filter_type)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "ApplyRecalibration",
"-R", ref_file,
"--input", in_file,
"--out", tx_out_file,
"--tranches_file", tranch_file,
"--recal_file", recal_file,
"--mode", filter_type]
resources = config_utils.get_resources("gatk_apply_recalibration", data["config"])
opts = resources.get("options", [])
if not opts:
opts += ["--ts_filter_level", sensitivity_cutoff]
params += opts
broad_runner.run_gatk(params)
return out_file
def _get_training_data(vrn_files):
"""Retrieve training data, returning an empty set of information if not available.
"""
out = {"SNP": [], "INDEL": []}
# SNPs
for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"),
("train_omni", "known=false,training=true,truth=true,prior=12.0"),
("train_1000g", "known=false,training=true,truth=false,prior=10.0"),
("dbsnp", "known=true,training=false,truth=false,prior=2.0")]:
if name not in vrn_files:
return {}
else:
out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name]))
# Indels
if "train_indels" in vrn_files:
out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0",
vrn_files["train_indels"]))
else:
return {}
return out
def _have_training_data(vrn_files):
return len(_get_training_data(vrn_files)) > 0
def _get_vqsr_training(filter_type, vrn_files):
"""Return parameters for VQSR training, handling SNPs and Indels.
"""
params = []
for name, train_info, fname in _get_training_data(vrn_files)[filter_type]:
params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname])
if filter_type == "INDEL":
params.extend(["--maxGaussians", "4"])
return params
def _get_vqsr_annotations(filter_type):
"""Retrieve appropriate annotations to use for VQSR based on filter type.
Issues reported with MQ and bwa-mem quality distribution, results in intermittent
failures to use VQSR:
http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing
http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set
"""
if filter_type == "SNP":
# MQ, MQRankSum
return ["DP", "QD", "FS", "ReadPosRankSum"]
else:
assert filter_type == "INDEL"
# MQRankSum
return ["DP", "QD", "FS", "ReadPosRankSum"]
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data):
"""Run variant quality score recalibration.
"""
cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91",
"99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"]
if sensitivity_cutoff not in cutoffs:
cutoffs.append(sensitivity_cutoff)
cutoffs.sort()
broad_runner = broad.runner_from_config(data["config"])
base = utils.splitext_plus(in_file)[0]
recal_file = "%s.recal" % base
tranches_file = "%s.tranches" % base
if not utils.file_exists(recal_file):
with file_transaction(data, recal_file, tranches_file) as (tx_recal, tx_tranches):
params = ["-T", "VariantRecalibrator",
"-R", ref_file,
"--input", in_file,
"--mode", filter_type,
"--recal_file", tx_recal,
"--tranches_file", tx_tranches]
params += _get_vqsr_training(filter_type, vrn_files)
resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"])
opts = resources.get("options", [])
if not opts:
for cutoff in cutoffs:
opts += ["-tranche", str(cutoff)]
for a in _get_vqsr_annotations(filter_type):
opts += ["-an", a]
params += opts
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
try:
broad_runner.new_resources("gatk-vqsr")
broad_runner.run_gatk(params, log_error=False, memscale=memscale)
except: # Can fail to run if not enough values are present to train.
return None, None
return recal_file, tranches_file
# ## SNP and indel specific variant filtration
def _already_hard_filtered(in_file, filter_type):
"""Check if we have a pre-existing hard filter file from previous VQSR failure.
"""
filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type)
return utils.file_exists(filter_file)
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type,
hard_filter_fn):
"""Filter SNP and indel variant calls using GATK best practice recommendations.
Hard filter if configuration indicates too little data or already finished a
hard filtering, otherwise try VQSR.
"""
# Algorithms multiplied by number of input files to check for large enough sample sizes
algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1]))
if (not config_utils.use_vqsr(algs) or
_already_hard_filtered(in_file, filter_type)):
logger.info("Skipping VQSR, using hard filers: we don't have whole genome input data")
return hard_filter_fn(in_file, data)
elif not _have_training_data(vrn_files):
logger.info("Skipping VQSR, using hard filers: genome build does not have sufficient training data")
return hard_filter_fn(in_file, data)
else:
sensitivities = {"INDEL": "98.0", "SNP": "99.97"}
recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files,
sensitivities[filter_type], filter_type, data)
if recal_file is None: # VQSR failed
logger.info("VQSR failed due to lack of training data. Using hard filtering.")
return hard_filter_fn(in_file, data)
else:
return _apply_vqsr(in_file, ref_file, recal_file, tranches_file,
sensitivities[filter_type], filter_type, data)
|
{
"content_hash": "b935b9077af5826836b985d1c4574034",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 193,
"avg_line_length": 49.08878504672897,
"alnum_prop": 0.6104712041884817,
"repo_name": "mjafin/bcbio-nextgen",
"id": "f1e3649d99cdc3f5e0016e7b79d1000938251dbd",
"size": "10505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/variation/gatkfilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1767655"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
}
|
from deepmonster.machinery.architecture import Architecture
from deepmonster.nnet.activations import Rectifier, Softmax
from deepmonster.nnet.blocks import ConvBlock, FullyConnectedBlock
from deepmonster.nnet.initializations import Initialization, Gaussian
from deepmonster.nnet.extras import Reshape
from deepmonster.nnet.normalizations import SpatialBatchNorm
class MnistArch(Architecture):
def configure(self):
self.assert_for_keys(['image_size', 'channels'])
self.image_size = self.config['image_size']
self.channels = self.config['channels']
super(MnistArch, self).configure()
def build_arch(self):
self.restrict_architecture()
config = {
'activation': Rectifier(),
'activation_norm': SpatialBatchNorm(),
'initialization': Initialization({'W': Gaussian(std=0.03)}),
'padding': 'half',
}
layers = [
ConvBlock(3, 16, image_size=self.image_size, num_channels=self.channels),
ConvBlock(3, 32, strides=(2,2)), # 14x14
ConvBlock(3, 64, strides=(2,2)), # 7x7
ConvBlock(4, 128, padding='valid'), # 4x4
ConvBlock(4, 256, padding='valid'), # 1x1
Reshape(([0], -1)),
FullyConnectedBlock(input_dims=256, output_dims=10,
activation=Softmax(), activation_norm=False)
]
self.add_layers_to_arch(layers, 'classifier', config)
|
{
"content_hash": "8620d3cb010b4f1208c261cba471f3a9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 85,
"avg_line_length": 39.78378378378378,
"alnum_prop": 0.6331521739130435,
"repo_name": "olimastro/DeepMonster",
"id": "614ed452feca3ca0ee0da91ccbab6dd23803a65e",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepmonster/testing/mnist-test/mnist-arch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "347634"
}
],
"symlink_target": ""
}
|
from wsme import types as wtypes
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
class Infrastructure(api_types.Base):
"""Description of an Infrastructure."""
stacks_uri = common_types.Uri
"URI to services."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/infrastructure',
name='infrastructure',
type='infrastructure',
stacks_uri='http://example.com/v1/infrastructure/stacks',
tags=['small'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='Solum Infrastructure endpoint')
class InfrastructureStack(api_types.Base):
"""Representation of an InfrastructureStack.
An InfrastructureStack is a set of servers used by Solum for
infrastructure purpose. It can be a build farm, a set of git servers, bug
trackers, or more.
"""
image_id = wtypes.text
"Unique Identifier of the image"
heat_stack_id = wtypes.text
"Unique Identifier of the heat stack associated with this infra stack"
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/infrastructure/stacks/1234',
name='jenkins',
type='infrastructure_stack',
image_id='1234',
heat_stack_id='1234',
tags=['small'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='A jenkins build farm of servers')
|
{
"content_hash": "b536a9c6e1f259e2604d7661cd429358",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 35.224489795918366,
"alnum_prop": 0.6176129779837776,
"repo_name": "stackforge/solum",
"id": "ffa750656ab565025aa6aa023a2b09030cfd7fcf",
"size": "2301",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "solum/api/controllers/v1/datamodel/infrastructure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
}
|
from gwt.ui.InlineHTML import (
DOM,
Factory,
HTML,
InlineHTML,
)
|
{
"content_hash": "a7e8df48cd09931636b024e7caa57aab",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.5975609756097561,
"repo_name": "anandology/pyjamas",
"id": "348ae80682ecdba51bf232e0036b12183a9af696",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/InlineHTML.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "325172"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "6383764"
},
{
"name": "Shell",
"bytes": "19448"
}
],
"symlink_target": ""
}
|
from flask import session
from flask_socketio import emit, join_room, leave_room
from application.extensions import socketio
@socketio.on('joined', namespace='/chat')
def joined(message):
"""Sent by clients when they enter a room.
A status message is broadcast to all people in the room."""
room = session.get('room')
join_room(room)
emit('status', {'msg': session.get('name') + ' has entered the room.'}, room=room)
@socketio.on('text', namespace='/chat')
def text(message):
"""Sent by a client when the user entered a new message.
The message is sent to all people in the room."""
room = session.get('room')
emit('message', {'msg': session.get('name') + ':' + message['msg']}, room=room)
@socketio.on('left', namespace='/chat')
def left(message):
"""Sent by clients when they leave a room.
A status message is broadcast to all people in the room."""
room = session.get('room')
leave_room(room)
emit('status', {'msg': session.get('name') + ' has left the room.'}, room=room)
|
{
"content_hash": "231e22aed0896d12f517c45d8606d9e1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 35.793103448275865,
"alnum_prop": 0.6628131021194605,
"repo_name": "chenvista/flask-demos",
"id": "508ec2a10bac1b405418e74a4669e3d74bb02429",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/controllers/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "110412"
},
{
"name": "Python",
"bytes": "1984435"
},
{
"name": "Shell",
"bytes": "1862"
}
],
"symlink_target": ""
}
|
from .dummy_data import (
matplotlib_multiple_axes_figures,
matplotlib_with_image,
matplotlib_without_image,
)
from .mock_server import mock_server, default_ctx, create_app, ParseCTX
from .mock_requests import InjectRequests
from .records import RecordsUtil
from .notebook_client import WandbNotebookClient
from .utils import (
subdict,
free_port,
first_filestream,
fixture_open,
fixture_copy,
assets_path,
notebook_path,
mock_sagemaker,
mock_k8s,
assert_deep_lists_equal,
)
__all__ = [
"ParseCTX",
"RecordsUtil",
"WandbNotebookClient",
"default_ctx",
"mock_server",
"fixture_open",
"fixture_copy",
"create_app",
"free_port",
"first_filestream",
"notebook_path",
"assets_path",
"mock_sagemaker",
"mock_k8s",
"assert_deep_lists_equal",
"subdict",
"matplotlib_multiple_axes_figures",
"matplotlib_with_image",
"matplotlib_without_image",
"InjectRequests",
]
|
{
"content_hash": "7e13751667d720ec17c4d2395fef6f4d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 21.977777777777778,
"alnum_prop": 0.6572295247724975,
"repo_name": "wandb/client",
"id": "da8b8a309276f8bc38d59496665c4cba1bd37715",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
from random import randint
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import serializers as rest_serializers
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from fallballapp.error_codes import ACTIVATION_ERROR, VALIDATION_ERROR
from fallballapp.models import Application, Client, ClientUser, Reseller
from fallballapp.utils import get_app_username, get_jwt_token
class AuthorizationSerializer(rest_serializers.HyperlinkedModelSerializer):
token = rest_serializers.SerializerMethodField()
def get_token(self, obj):
"""
As token exists inside User object, we need to get it to show it with particular reseller
"""
token = Token.objects.filter(user=obj.owner).first()
return token.key if token else None
class ApplicationSerializer(AuthorizationSerializer):
entrypoint = rest_serializers.SerializerMethodField()
class Meta:
model = Application
fields = ('id', 'entrypoint', 'token', 'async')
def get_entrypoint(self, obj):
return 'https://{}/v1/'.format(settings.SERVICE_HOST)
def create(self, validated_data):
if get_user_model().objects.filter(username=validated_data['id']).exists():
raise ValidationError('Application with such id is already created')
user = get_user_model().objects.create(username=validated_data['id'])
return Application.objects.create(owner=user, **validated_data)
class ApplicationPutSerializer(AuthorizationSerializer):
class Meta:
model = Application
fields = ('async',)
class StorageResellerSerializer(rest_serializers.HyperlinkedModelSerializer):
"""
Auxiliary serializer in order to make nested json: "storage": {"usage","limit"}
"""
usage = rest_serializers.SerializerMethodField()
class Meta:
model = Reseller
fields = ('usage', 'limit')
def get_usage(self, obj):
return obj.get_usage()
class ResellerSerializer(AuthorizationSerializer):
storage = StorageResellerSerializer(source='*')
clients_amount = rest_serializers.SerializerMethodField()
class Meta:
model = Reseller
fields = ('name', 'rid', 'token', 'clients_amount', 'storage')
def create(self, validated_data):
"""
This method is overwritten in order to create User object and associate it with reseller.
This operation is needed to create token for reseller
"""
application_id = self.initial_data['application'].id
reseller_name = validated_data['name']
username = '{application_id}.{reseller_name}'.format(application_id=application_id,
reseller_name=reseller_name)
if get_user_model().objects.filter(username=username).exists():
raise ValidationError('Reseller with such name is already created')
user = get_user_model().objects.create(username=username)
return Reseller.objects.create(owner=user, application=self.initial_data['application'],
**validated_data)
def get_clients_amount(self, obj):
return obj.get_clients_amount()
class ResellerNameSerializer(rest_serializers.HyperlinkedModelSerializer):
class Meta:
model = Reseller
fields = ('name', )
class StorageClientSerializer(rest_serializers.HyperlinkedModelSerializer):
usage = rest_serializers.SerializerMethodField()
class Meta:
model = Client
fields = ('usage', 'limit')
def get_usage(self, obj):
return obj.get_usage()
class ClientSerializer(rest_serializers.HyperlinkedModelSerializer):
storage = StorageClientSerializer(source='*')
users_amount = rest_serializers.SerializerMethodField()
users_by_type = rest_serializers.SerializerMethodField()
email = rest_serializers.EmailField(required=False)
postal_code = rest_serializers.CharField(required=False)
class Meta:
model = Client
fields = (
'name', 'email', 'postal_code', 'creation_date', 'users_amount', 'users_by_type',
'storage', 'is_integrated', 'status', 'country', 'environment',)
read_only_fields = ('status',)
def validate_postal_code(self, value):
if not value.isdigit() or len(value) != 5:
# ValidationError is triggered with error code and message
raise rest_serializers.ValidationError({
'code': VALIDATION_ERROR,
'message': "Postal code must be a 5-digit number",
})
if value.startswith('999'):
raise rest_serializers.ValidationError({
'code': ACTIVATION_ERROR,
'message': "Postal code can't start with 999",
})
return value
def create(self, validated_data):
"""
Method is overwritten as we need to associate user with reseller
"""
return Client.objects.create(reseller=self.initial_data['reseller'], **validated_data)
def update(self, instance, validated_data):
if 'country' in validated_data:
del validated_data['country']
if 'environment' in validated_data:
del validated_data['environment']
return super(self.__class__, self).update(instance, validated_data)
def get_users_amount(self, obj):
return obj.get_users_amount()
def get_users_by_type(self, obj):
return obj.get_users_by_type()
class StorageClientUserSerializer(rest_serializers.HyperlinkedModelSerializer):
class Meta:
model = ClientUser
fields = ('usage', 'limit')
class ClientUserSerializer(rest_serializers.ModelSerializer):
storage = StorageClientUserSerializer(source='*')
admin = rest_serializers.BooleanField()
superadmin = rest_serializers.BooleanField()
password = rest_serializers.CharField(required=False)
profile_type = rest_serializers.CharField(required=False)
email = rest_serializers.EmailField(required=True)
class Meta:
model = ClientUser
fields = ('user_id', 'email', 'password', 'storage', 'admin', 'superadmin', 'profile_type')
def create(self, validated_data):
# Usage is random but not more than limit
if 'usage' not in validated_data:
validated_data['usage'] = randint(0, validated_data['limit'])
if 'user_id' not in validated_data:
validated_data['user_id'] = uuid.uuid4()
username = get_app_username(self.initial_data['application_id'], validated_data['user_id'])
user = get_user_model().objects.create_user(username=username)
return ClientUser.objects.create(owner=user,
client=self.initial_data['client'], **validated_data)
class UserAuthorizationSerializer(rest_serializers.ModelSerializer):
storage = StorageClientUserSerializer(source='*')
admin = rest_serializers.BooleanField()
company = rest_serializers.SerializerMethodField()
token = rest_serializers.SerializerMethodField()
class Meta:
model = ClientUser
fields = ('user_id', 'email', 'password', 'storage', 'admin', 'company', 'token')
def get_company(self, obj):
return obj.client.name
def get_token(self, obj):
return get_jwt_token(obj.owner)
|
{
"content_hash": "a89d0cd7f4a58d7dee55f3db8731ddbf",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 99,
"avg_line_length": 35.86538461538461,
"alnum_prop": 0.6631367292225201,
"repo_name": "ingrammicro/fallball-service",
"id": "aea3811c41f5ff818bd8bf32a7e422b97a4321b1",
"size": "7460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fallball/fallballapp/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "5175"
},
{
"name": "Dockerfile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "104622"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
}
|
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
'''
Created on Feb 18, 2014
@author: Gil Vernik
'''
from storlet_common import StorletTimeout,StorletException
from swift.common.utils import get_logger, register_swift_info, is_success, config_true_value
from swift.common.swob import Request, Response, wsgify, \
HTTPBadRequest, HTTPUnauthorized, \
HTTPInternalServerError
from swift.proxy.controllers.base import get_account_info
from swift.common.exceptions import ConnectionTimeout
from eventlet import Timeout
import ConfigParser
import os
import sys
class StorletHandlerMiddleware(object):
def __init__(self, app, conf, storlet_conf):
self.app = app
self.logger = get_logger(conf, log_route='storlet_handler')
self.stimeout = int(storlet_conf.get('storlet_timeout'))
self.storlet_containers = [ storlet_conf.get('storlet_container'),
storlet_conf.get('storlet_dependency')]
self.execution_server = storlet_conf.get('execution_server')
self.gateway_module = storlet_conf['gateway_module']
self.proxy_only_storlet_execution = storlet_conf['storlet_execute_on_proxy_only']
self.gateway_conf = storlet_conf
@wsgify
def __call__(self, req):
try:
if self.execution_server == 'proxy':
version, account, container, obj = req.split_path(
2, 4, rest_with_last=True)
else:
device, partition, account, container, obj = \
req.split_path(5, 5, rest_with_last=True)
version = '0'
except Exception as e:
return req.get_response(self.app)
self.logger.debug('storlet_handler call in %s: with %s/%s/%s' %
(self.execution_server,
account,
container,
obj))
storlet_execution = False
if 'X-Run-Storlet' in req.headers:
storlet_execution = True
if (storlet_execution == True and account and container and obj) or \
(container in self.storlet_containers and obj):
gateway = self.gateway_module(self.gateway_conf,
self.logger, self.app, version, account,
container, obj)
else:
return req.get_response(self.app)
try:
if self.execution_server == 'object' and storlet_execution:
if req.method == 'GET':
self.logger.info('GET. Run storlet')
orig_resp = req.get_response(self.app)
if not is_success(orig_resp.status_int):
return orig_resp
if self._is_range_request(req) == True or \
self._is_slo_get_request(req, orig_resp, account, \
container, obj) or \
self.proxy_only_storlet_execution == True:
# For SLOs, and proxy only mode
# Storlet are executed on the proxy
# Therefore we return the object part without
# Storlet invocation:
self.logger.info(
'storlet_handler: invocation over %s/%s/%s %s' %
(account, container, obj,
'to be executed on proxy'))
return orig_resp
else:
# We apply here the Storlet:
self.logger.info(
'storlet_handler: invocation over %s/%s/%s %s' %
(account, container, obj,
'to be executed locally'))
old_env = req.environ.copy()
orig_req = Request.blank(old_env['PATH_INFO'], old_env)
(out_md, app_iter) = gateway.gatewayObjectGetFlow(req,
container,
obj,
orig_resp)
if 'Content-Length' in orig_resp.headers:
orig_resp.headers.pop('Content-Length')
if 'Transfer-Encoding' in orig_resp.headers:
orig_resp.headers.pop('Transfer-Encoding')
return Response(
app_iter=app_iter,
headers = orig_resp.headers,
request=orig_req,
conditional_response=True)
elif (self.execution_server == 'proxy'):
if (storlet_execution or container in self.storlet_containers):
account_meta = get_account_info(req.environ,
self.app)['meta']
storlets_enabled = account_meta.get('storlet-enabled',
'False')
if storlets_enabled == 'False':
self.logger.info('Account disabled for storlets')
return HTTPBadRequest('Account disabled for storlets')
if req.method == 'GET' and storlet_execution:
if not gateway.authorizeStorletExecution(req):
return HTTPUnauthorized('Storlet: no permission')
# The get request may be a SLO object GET request.
# Simplest solution would be to invoke a HEAD
# for every GET request to test if we are in SLO case.
# In order to save the HEAD overhead we implemented
# a slightly more involved flow:
# At proxy side, we augment request with Storlet stuff
# and let the request flow.
# At object side, we invoke the plain (non Storlet)
# request and test if we are in SLO case.
# and invoke Storlet only if non SLO case.
# Back at proxy side, we test if test received
# full object to detect if we are in SLO case,
# and invoke Storlet only if in SLO case.
gateway.augmentStorletRequest(req)
original_resp = req.get_response(self.app)
if self._is_range_request(req) == True or \
self._is_slo_get_request(req, original_resp, account, \
container, obj) or \
self.proxy_only_storlet_execution == True:
# SLO / proxy only case:
# storlet to be invoked now at proxy side:
(out_md, app_iter) = gateway.gatewayProxyGETFlow(req,
container,
obj,
original_resp)
# adapted from non SLO GET flow
if is_success(original_resp.status_int):
old_env = req.environ.copy()
orig_req = Request.blank(old_env['PATH_INFO'], old_env)
resp_headers = original_resp.headers
resp_headers['Content-Length'] = None
return Response(
app_iter=app_iter,
headers=resp_headers,
request=orig_req,
conditional_response=True)
return original_resp
else:
# Non proxy GET case: Storlet was already invoked at object side
if 'Transfer-Encoding' in original_resp.headers:
original_resp.headers.pop('Transfer-Encoding')
if is_success(original_resp.status_int):
old_env = req.environ.copy()
orig_req = Request.blank(old_env['PATH_INFO'], old_env)
resp_headers = original_resp.headers
resp_headers['Content-Length'] = None
return Response(
app_iter=original_resp.app_iter,
headers=resp_headers,
request=orig_req,
conditional_response=True)
return original_resp
elif req.method == 'PUT':
if (container in self.storlet_containers):
ret = gateway.validateStorletUpload(req)
if ret:
return HTTPBadRequest(body = ret)
else:
if not gateway.authorizeStorletExecution(req):
return HTTPUnauthorized('Storlet: no permissions')
if storlet_execution:
gateway.augmentStorletRequest(req)
(out_md, app_iter) = gateway.gatewayProxyPutFlow(req,
container,
obj)
req.environ['wsgi.input'] = app_iter
if 'CONTENT_LENGTH' in req.environ:
req.environ.pop('CONTENT_LENGTH')
req.headers['Transfer-Encoding'] = 'chunked'
return req.get_response(self.app)
except (StorletTimeout, ConnectionTimeout, Timeout) as e:
StorletException.handle(self.logger, e)
return HTTPInternalServerError(body='Storlet execution timed out')
except Exception as e:
StorletException.handle(self.logger, e)
return HTTPInternalServerError(body='Storlet execution failed')
return req.get_response(self.app)
'''
Determines whether the request is a byte-range request
args:
req: the request
'''
def _is_range_request(self, req):
if 'Range' in req.headers:
return True
return False
'''
Determines from a GET request and its associated response
if the object is a SLO
args:
req: the request
resp: the response
account: the account as extracted from req
container: the response as extracted from req
obj: the response as extracted from req
'''
def _is_slo_get_request(self, req, resp, account, container, obj):
if req.method != 'GET':
return False
if req.params.get('multipart-manifest') == 'get':
return False
self.logger.info( 'Verify if {0}/{1}/{2} is an SLO assembly object'.format(account,container, obj))
if resp.status_int < 300 and resp.status_int >= 200 :
for key in resp.headers:
if (key.lower() == 'x-static-large-object' and
config_true_value(resp.headers[key])):
self.logger.info( '{0}/{1}/{2} is indeed an SLO assembly object'.format(account,container, obj))
return True
self.logger.info( '{0}/{1}/{2} is NOT an SLO assembly object'.format(account,container, obj))
return False
self.logger.error( 'Failed to check if {0}/{1}/{2} is an SLO assembly object. Got status {3}'.format(account,container, obj,resp.status))
raise Exception('Failed to check if {0}/{1}/{2} is an SLO assembly object. Got status {3}'.format(account,container, obj,resp.status))
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
storlet_conf = dict()
storlet_conf['storlet_timeout'] = conf.get('storlet_timeout',40)
storlet_conf['storlet_container'] = conf.get('storlet_container','storlet')
storlet_conf['storlet_dependency'] = conf.get('storlet_dependency',
'dependency')
storlet_conf['execution_server'] = conf.get('execution_server', '')
storlet_conf['storlet_execute_on_proxy_only'] = config_true_value(conf.get('storlet_execute_on_proxy_only', 'false'))
storlet_conf['gateway_conf'] = {}
module_name = conf.get('storlet_gateway_module','')
mo = module_name[:module_name.rfind(':')]
cl = module_name[module_name.rfind(':') + 1:]
module = __import__(mo, fromlist=[cl])
the_class = getattr(module, cl)
configParser = ConfigParser.RawConfigParser()
configParser.read(conf.get('storlet_gateway_conf',
'/etc/swift/storlet_stub_gateway.conf'))
additional_items = configParser.items("DEFAULT")
for key, val in additional_items:
storlet_conf[key]= val
swift_info = {}
storlet_conf["gateway_module"] = the_class
register_swift_info('storlet_handler', False, **swift_info)
def storlet_handler_filter(app):
return StorletHandlerMiddleware(app, conf, storlet_conf)
return storlet_handler_filter
|
{
"content_hash": "674845354eac8870d5fc923695e3955d",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 145,
"avg_line_length": 48.373333333333335,
"alnum_prop": 0.4985529217199559,
"repo_name": "Open-I-Beam/swift-storlets",
"id": "f6866eb922063bd176eafd5654b9fed17c475f8e",
"size": "14512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Engine/swift/storlet_middleware/storlet_handler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "31436"
},
{
"name": "Java",
"bytes": "104889"
},
{
"name": "Python",
"bytes": "195633"
},
{
"name": "Shell",
"bytes": "5451"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import ComponentCurrentPricingPlanOperations, EASubscriptionListMigrationDateOperations, EASubscriptionMigrateToNewPricingModelOperations, EASubscriptionRollbackToLegacyPricingModelOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient:
"""Composite Swagger for Application Insights Management Client.
:ivar ea_subscription_migrate_to_new_pricing_model:
EASubscriptionMigrateToNewPricingModelOperations operations
:vartype ea_subscription_migrate_to_new_pricing_model:
azure.mgmt.applicationinsights.v2017_10_01.aio.operations.EASubscriptionMigrateToNewPricingModelOperations
:ivar ea_subscription_rollback_to_legacy_pricing_model:
EASubscriptionRollbackToLegacyPricingModelOperations operations
:vartype ea_subscription_rollback_to_legacy_pricing_model:
azure.mgmt.applicationinsights.v2017_10_01.aio.operations.EASubscriptionRollbackToLegacyPricingModelOperations
:ivar ea_subscription_list_migration_date: EASubscriptionListMigrationDateOperations operations
:vartype ea_subscription_list_migration_date:
azure.mgmt.applicationinsights.v2017_10_01.aio.operations.EASubscriptionListMigrationDateOperations
:ivar component_current_pricing_plan: ComponentCurrentPricingPlanOperations operations
:vartype component_current_pricing_plan:
azure.mgmt.applicationinsights.v2017_10_01.aio.operations.ComponentCurrentPricingPlanOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2017-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.ea_subscription_migrate_to_new_pricing_model = EASubscriptionMigrateToNewPricingModelOperations(self._client, self._config, self._serialize, self._deserialize)
self.ea_subscription_rollback_to_legacy_pricing_model = EASubscriptionRollbackToLegacyPricingModelOperations(self._client, self._config, self._serialize, self._deserialize)
self.ea_subscription_list_migration_date = EASubscriptionListMigrationDateOperations(self._client, self._config, self._serialize, self._deserialize)
self.component_current_pricing_plan = ComponentCurrentPricingPlanOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
{
"content_hash": "9a6af87f6550138b2847d9915991f634",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 208,
"avg_line_length": 52.77777777777778,
"alnum_prop": 0.7458373205741626,
"repo_name": "Azure/azure-sdk-for-python",
"id": "37a5b0bb8b58f0547a13c5b34428ad4a9cb04c1a",
"size": "5693",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2017_10_01/aio/_application_insights_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import logging
import os
import shutil
import sys
import tempfile
import threading
import time
from collections import namedtuple
from pipes import quote
# pylint: disable=redefined-builtin
from devlib.exception import WorkerThreadError, TargetNotRespondingError, TimeoutError
from devlib.utils.csvutil import csvwriter
logger = logging.getLogger('rendering')
SurfaceFlingerFrame = namedtuple('SurfaceFlingerFrame',
'desired_present_time actual_present_time frame_ready_time')
VSYNC_INTERVAL = 16666667
class FrameCollector(threading.Thread):
def __init__(self, target, period):
super(FrameCollector, self).__init__()
self.target = target
self.period = period
self.stop_signal = threading.Event()
self.frames = []
self.temp_file = None
self.refresh_period = None
self.drop_threshold = None
self.unresponsive_count = 0
self.last_ready_time = 0
self.exc = None
self.header = None
def run(self):
logger.debug('Frame data collection started.')
try:
self.stop_signal.clear()
fd, self.temp_file = tempfile.mkstemp()
logger.debug('temp file: {}'.format(self.temp_file))
wfh = os.fdopen(fd, 'wb')
try:
while not self.stop_signal.is_set():
self.collect_frames(wfh)
time.sleep(self.period)
finally:
wfh.close()
except (TargetNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
self.exc = WorkerThreadError(self.name, sys.exc_info())
logger.debug('Frame data collection stopped.')
def stop(self):
self.stop_signal.set()
self.join()
if self.unresponsive_count:
message = 'FrameCollector was unrepsonsive {} times.'.format(self.unresponsive_count)
if self.unresponsive_count > 10:
logger.warning(message)
else:
logger.debug(message)
if self.exc:
raise self.exc # pylint: disable=E0702
def process_frames(self, outfile=None):
if not self.temp_file:
raise RuntimeError('Attempting to process frames before running the collector')
with open(self.temp_file) as fh:
self._process_raw_file(fh)
if outfile:
shutil.copy(self.temp_file, outfile)
os.unlink(self.temp_file)
self.temp_file = None
def write_frames(self, outfile, columns=None):
if columns is None:
header = self.header
frames = self.frames
else:
indexes = []
for c in columns:
if c not in self.header:
msg = 'Invalid column "{}"; must be in {}'
raise ValueError(msg.format(c, self.header))
indexes.append(self.header.index(c))
frames = [[f[i] for i in indexes] for f in self.frames]
header = columns
with csvwriter(outfile) as writer:
if header:
writer.writerow(header)
writer.writerows(frames)
def collect_frames(self, wfh):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def _process_raw_file(self, fh):
raise NotImplementedError()
class SurfaceFlingerFrameCollector(FrameCollector):
def __init__(self, target, period, view, header=None):
super(SurfaceFlingerFrameCollector, self).__init__(target, period)
self.view = view
self.header = header or SurfaceFlingerFrame._fields
def collect_frames(self, wfh):
for activity in self.list():
if activity == self.view:
wfh.write(self.get_latencies(activity).encode('utf-8'))
def clear(self):
self.target.execute('dumpsys SurfaceFlinger --latency-clear ')
def get_latencies(self, activity):
cmd = 'dumpsys SurfaceFlinger --latency {}'
return self.target.execute(cmd.format(quote(activity)))
def list(self):
text = self.target.execute('dumpsys SurfaceFlinger --list')
return text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
def _process_raw_file(self, fh):
found = False
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
for line in text.split('\n'):
line = line.strip()
if not line:
continue
if 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
self.unresponsive_count += 1
continue
parts = line.split()
# We only want numerical data, ignore textual data.
try:
parts = list(map(int, parts))
except ValueError:
continue
found = True
self._process_trace_parts(parts)
if not found:
logger.warning('Could not find expected SurfaceFlinger output.')
def _process_trace_parts(self, parts):
if len(parts) == 3:
frame = SurfaceFlingerFrame(*parts)
if not frame.frame_ready_time:
return # "null" frame
if frame.frame_ready_time <= self.last_ready_time:
return # duplicate frame
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
logger.debug('Dropping bogus frame {}.'.format(' '.join(map(str, parts))))
return # bogus data
self.last_ready_time = frame.frame_ready_time
self.frames.append(frame)
elif len(parts) == 1:
self.refresh_period = parts[0]
self.drop_threshold = self.refresh_period * 1000
else:
msg = 'Unexpected SurfaceFlinger dump output: {}'.format(' '.join(map(str, parts)))
logger.warning(msg)
def read_gfxinfo_columns(target):
output = target.execute('dumpsys gfxinfo --list framestats')
lines = iter(output.split('\n'))
for line in lines:
if line.startswith('---PROFILEDATA---'):
break
columns_line = next(lines)
return columns_line.split(',')[:-1] # has a trailing ','
class GfxinfoFrameCollector(FrameCollector):
def __init__(self, target, period, package, header=None):
super(GfxinfoFrameCollector, self).__init__(target, period)
self.package = package
self.header = None
self._init_header(header)
def collect_frames(self, wfh):
cmd = 'dumpsys gfxinfo {} framestats'
result = self.target.execute(cmd.format(self.package))
if sys.version_info[0] == 3:
wfh.write(result.encode('utf-8'))
else:
wfh.write(result)
def clear(self):
pass
def _init_header(self, header):
if header is not None:
self.header = header
else:
self.header = read_gfxinfo_columns(self.target)
def _process_raw_file(self, fh):
found = False
try:
last_vsync = 0
while True:
for line in fh:
if line.startswith('---PROFILEDATA---'):
found = True
break
next(fh) # headers
for line in fh:
if line.startswith('---PROFILEDATA---'):
break
entries = list(map(int, line.strip().split(',')[:-1])) # has a trailing ','
if entries[1] <= last_vsync:
continue # repeat frame
last_vsync = entries[1]
self.frames.append(entries)
except StopIteration:
pass
if not found:
logger.warning('Could not find frames data in gfxinfo output')
return
def _file_reverse_iter(fh, buf_size=1024):
fh.seek(0, os.SEEK_END)
offset = 0
file_size = remaining_size = fh.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
fh.seek(file_size - offset)
buf = fh.read(min(remaining_size, buf_size))
remaining_size -= buf_size
yield buf
def gfxinfo_get_last_dump(filepath):
"""
Return the last gfxinfo dump from the frame collector's raw output.
"""
record = ''
with open(filepath, 'r') as fh:
fh_iter = _file_reverse_iter(fh)
try:
while True:
buf = next(fh_iter)
ix = buf.find('** Graphics')
if ix >= 0:
return buf[ix:] + record
ix = buf.find(' **\n')
if ix >= 0:
buf = next(fh_iter) + buf
ix = buf.find('** Graphics')
if ix < 0:
msg = '"{}" appears to be corrupted'
raise RuntimeError(msg.format(filepath))
return buf[ix:] + record
record = buf + record
except StopIteration:
pass
|
{
"content_hash": "82c67bec9eeb4fa3420c7249185072a5",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 99,
"avg_line_length": 34.091575091575095,
"alnum_prop": 0.5532394971526807,
"repo_name": "ARM-software/lisa",
"id": "1e98115422baf69fe30ed5ed508624d450c6c8f0",
"size": "9888",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "external/devlib/devlib/utils/rendering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68635"
},
{
"name": "Jupyter Notebook",
"bytes": "60193313"
},
{
"name": "Makefile",
"bytes": "6176"
},
{
"name": "Perl",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "2337042"
},
{
"name": "Shell",
"bytes": "108802"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "529534f38af6acb9615e2b839a3f9e07",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 26,
"alnum_prop": 0.7136752136752137,
"repo_name": "kblauer/cs-outreach",
"id": "bf43c1cb011bb2b193bcd4ed794e0d4013815a39",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server_django/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "90922"
},
{
"name": "Python",
"bytes": "13329"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import sys
import typing
from collections.abc import Sequence
from copy import deepcopy
from itertools import chain
from pathlib import Path
from types import SimpleNamespace as NS
from typing import Any, Iterable, Union
from warnings import warn
import pandas as pd
import matplotlib as mpl
import matplotlib.figure
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.backends.backend_pdf import PdfPages
from patsy.eval import EvalEnvironment
from .mapping.aes import aes, make_labels
from .layer import Layers
from .facets import facet_null
from .facets.layout import Layout
from .options import get_option, SUBPLOTS_ADJUST
from .themes.theme import theme, theme_get
from .exceptions import PlotnineError, PlotnineWarning
from .scales.scales import Scales
from .coords import coord_cartesian
from .guides.guides import guides
# mypy believes there is a duplicate definition
# of geom_blank even though it only appears once
from .geoms import geom_blank # type: ignore[no-redef] # mypy bug
from .utils import (
defaults,
from_inches,
is_data_like,
order_as_data_mapping,
to_inches,
ungroup
)
if typing.TYPE_CHECKING:
import plotnine as p9
from .typing import DataLike, PlotAddable
# Show plots if in interactive mode
if sys.flags.interactive:
plt.ion()
class ggplot:
"""
Create a new ggplot object
Parameters
----------
data : dataframe
Default data for plot. Every layer that does not
have data of its own will use this one.
mapping : aes
Default aesthetics mapping for the plot. These will be used
by all layers unless specifically overridden.
environment : dict, ~patsy.Eval.EvalEnvironment
If a variable defined in the aesthetic mapping is not
found in the data, ggplot will look for it in this
namespace. It defaults to using the environment/namespace.
in which `ggplot()` is called.
"""
def __init__(
self,
data: DataLike | None = None,
mapping: aes | None = None,
environment: dict[str, Any] | None = None
) -> None:
# Allow some sloppiness
data, mapping = order_as_data_mapping(data, mapping)
if mapping is None:
mapping = aes()
# Recognize plydata groups
if hasattr(data, 'group_indices') and 'group' not in mapping:
mapping = mapping.copy()
mapping['group'] = data.group_indices() # type: ignore
self.data = data
self.mapping = mapping
self.facet = facet_null()
self.labels = make_labels(mapping)
self.layers = Layers()
self.guides = guides()
self.scales = Scales()
self.theme = theme_get()
self.coordinates = coord_cartesian()
self.environment = environment or EvalEnvironment.capture(1)
self.layout = Layout()
self.figure: mpl.figure.Figure | None = None
self.watermarks: list[p9.watermark] = []
self.axs = None
# build artefacts
self._build_objs = NS()
def __str__(self) -> str:
"""
Print/show the plot
"""
self.draw(show=True)
# Return and empty string so that print(p) is "pretty"
return ''
def __repr__(self) -> str:
"""
Print/show the plot
"""
self.__str__()
return '<ggplot: (%d)>' % self.__hash__()
def __deepcopy__(self, memo: dict[Any, Any]) -> ggplot:
"""
Deep copy without copying the dataframe and environment
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
old = self.__dict__
new = result.__dict__
# don't make a deepcopy of data, or environment
shallow = {'data', 'environment', 'figure', '_build_objs'}
for key, item in old.items():
if key in shallow:
new[key] = old[key]
memo[id(new[key])] = new[key]
else:
new[key] = deepcopy(old[key], memo)
return result
def __iadd__(
self,
other: PlotAddable | list[PlotAddable] | None
) -> ggplot:
"""
Add other to ggplot object
Parameters
----------
other : object or Sequence
Either an object that knows how to "radd"
itself to a ggplot, or a list of such objects.
"""
if isinstance(other, Sequence):
for item in other:
item.__radd__(self)
return self
elif other is None:
return self
else:
return other.__radd__(self)
def __add__(self, other: PlotAddable | list[PlotAddable] | None) -> ggplot:
"""
Add to ggplot from a list
Parameters
----------
other : object or Sequence
Either an object that knows how to "radd"
itself to a ggplot, or a list of such objects.
"""
self = deepcopy(self)
return self.__iadd__(other)
def __rrshift__(self, other: DataLike) -> ggplot:
"""
Overload the >> operator to receive a dataframe
"""
other = ungroup(other)
if is_data_like(other):
if self.data is None:
self.data = other
else:
raise PlotnineError(
"`>>` failed, ggplot object has data."
)
else:
msg = "Unknown type of data -- {!r}"
raise TypeError(msg.format(type(other)))
return self
def draw(self, show: bool = False) -> mpl.figure.Figure:
"""
Render the complete plot
Parameters
----------
show : bool (default: False)
Whether to show the plot.
Returns
-------
fig : ~matplotlib.figure.Figure
Matplotlib figure
"""
# Do not draw if drawn already.
# This prevents a needless error when reusing
# figure & axes in the jupyter notebook.
if self.figure:
return self.figure
# Prevent against any modifications to the users
# ggplot object. Do the copy here as we may/may not
# assign a default theme
self = deepcopy(self)
with plot_context(self, show=show):
self._build()
# setup
figure, axs = self._create_figure()
self._setup_parameters()
self.facet.strips.generate() # type: ignore[attr-defined]
self._resize_panels()
# Drawing
self._draw_layers()
self._draw_labels()
self._draw_breaks_and_labels()
self._draw_legend()
self._draw_title()
self._draw_caption()
self._draw_watermarks()
# Artist object theming
self.theme.apply(figure, axs)
return self.figure
def _draw_using_figure(self, figure, axs):
"""
Draw onto already created figure and axes
This is can be used to draw animation frames,
or inset plots. It is intended to be used
after the key plot has been drawn.
Parameters
----------
figure : ~matplotlib.figure.Figure
Matplotlib figure
axs : array_like
Array of Axes onto which to draw the plots
"""
self = deepcopy(self)
self.figure = figure
self.axs = axs
with plot_context(self):
self._build()
self._setup_parameters()
self.facet.strips.generate()
self._draw_layers()
self._draw_breaks_and_labels()
self._draw_legend()
self.theme.apply(figure, axs)
return self
def _build(self):
"""
Build ggplot for rendering.
Notes
-----
This method modifies the ggplot object. The caller is
responsible for making a copy and using that to make
the method call.
"""
if not self.layers:
self += geom_blank()
layers = self._build_objs.layers = self.layers
scales = self._build_objs.scales = self.scales
layout = self._build_objs.layout = self.layout
# Update the label information for the plot
layers.update_labels(self)
# Give each layer a copy of the data, the mappings and
# the execution environment
layers.setup(self)
# Initialise panels, add extra data for margins & missing
# facetting variables, and add on a PANEL variable to data
layout.setup(layers, self)
# Compute aesthetics to produce data with generalised
# variable names
layers.compute_aesthetics(self)
# Transform data using all scales
layers.transform(scales)
# Make sure missing (but required) aesthetics are added
scales.add_missing(('x', 'y'))
# Map and train positions so that statistics have access
# to ranges and all positions are numeric
layout.train_position(layers, scales)
layout.map_position(layers)
# Apply and map statistics
layers.compute_statistic(layout)
layers.map_statistic(self)
# Prepare data in geoms
# e.g. from y and width to ymin and ymax
layers.setup_data()
# Apply position adjustments
layers.compute_position(layout)
# Reset position scales, then re-train and map. This
# ensures that facets have control over the range of
# a plot.
layout.reset_position_scales()
layout.train_position(layers, scales)
layout.map_position(layers)
# Train and map non-position scales
npscales = scales.non_position_scales()
if len(npscales):
layers.train(npscales)
layers.map(npscales)
# Train coordinate system
layout.setup_panel_params(self.coordinates)
# fill in the defaults
layers.use_defaults()
# Allow stats to modify the layer data
layers.finish_statistics()
# Allow layout to modify data before rendering
layout.finish_data(layers)
def _setup_parameters(self):
"""
Set facet properties
"""
# facet
self.facet.set(
layout=self.layout,
theme=self.theme,
coordinates=self.coordinates,
figure=self.figure,
axs=self.axs
)
self.facet.initialise_strips()
# layout
self.layout.axs = self.axs
# theme
self.theme.figure = self.figure
def _create_figure(self):
"""
Create Matplotlib figure and axes
"""
# Good for development
if get_option('close_all_figures'):
plt.close('all')
figure = plt.figure()
axs = self.facet.make_axes(
figure,
self.layout.layout,
self.coordinates)
# Dictionary to collect matplotlib objects that will
# be targeted for theming by the themeables
figure._themeable = {}
self.figure = figure
self.axs = axs
return figure, axs
def _resize_panels(self):
"""
Resize panels
"""
self.theme.setup_figure(self.figure)
self.facet.spaceout_and_resize_panels()
def _draw_layers(self):
"""
Draw the main plot(s) onto the axes.
"""
# Draw the geoms
self.layers.draw(self.layout, self.coordinates)
def _draw_breaks_and_labels(self):
"""
Draw breaks and labels
"""
# 1. Draw facet labels a.k.a strip text
# 2. Decorate the axes
# - xaxis & yaxis breaks, labels, limits, ...
#
# pidx is the panel index (location left to right, top to bottom)
self.facet.strips.draw()
for pidx, layout_info in self.layout.layout.iterrows():
ax = self.axs[pidx]
panel_params = self.layout.panel_params[pidx]
self.facet.set_limits_breaks_and_labels(panel_params, ax)
# Remove unnecessary ticks and labels
if not layout_info['AXIS_X']:
ax.xaxis.set_tick_params(
which='both', bottom=False, labelbottom=False)
if not layout_info['AXIS_Y']:
ax.yaxis.set_tick_params(
which='both', left=False, labelleft=False)
if layout_info['AXIS_X']:
ax.xaxis.set_tick_params(which='both', bottom=True)
if layout_info['AXIS_Y']:
ax.yaxis.set_tick_params(which='both', left=True)
def _draw_legend(self):
"""
Draw legend onto the figure
"""
legend_box = self.guides.build(self)
if not legend_box:
return
figure = self.figure
left = figure.subplotpars.left
right = figure.subplotpars.right
top = figure.subplotpars.top
bottom = figure.subplotpars.bottom
W, H = figure.get_size_inches()
position = self.guides.position
_property = self.theme.themeables.property
spacing = _property('legend_box_spacing')
strip_margin_x = _property('strip_margin_x')
strip_margin_y = _property('strip_margin_y')
right_strip_width = self.facet.strips.breadth('right')
top_strip_height = self.facet.strips.breadth('top')
# Other than when the legend is on the right the rest of
# the computed x, y locations are not gauranteed not to
# overlap with the axes or the labels. The user must then
# use the legend_margin theme parameter to adjust the
# location. This should get fixed when MPL has a better
# layout manager.
if position == 'right':
loc = 'center left'
pad = right_strip_width*(1+strip_margin_x) + spacing
x = right + pad/W
y = 0.5
elif position == 'left':
loc = 'center right'
x = left - spacing/W
y = 0.5
elif position == 'top':
loc = 'lower center'
x = 0.5
pad = top_strip_height*(1+strip_margin_y) + spacing
y = top + pad/H
elif position == 'bottom':
loc = 'upper center'
x = 0.5
y = bottom - spacing/H
else:
loc = 'center'
x, y = position
anchored_box = AnchoredOffsetbox(
loc=loc,
child=legend_box,
pad=0.,
frameon=False,
bbox_to_anchor=(x, y),
bbox_transform=figure.transFigure,
borderpad=0.
)
anchored_box.set_zorder(90.1)
self.figure._themeable['legend_background'] = anchored_box
ax = self.axs[0]
ax.add_artist(anchored_box)
def _draw_labels(self):
"""
Draw x and y labels onto the figure
"""
# This is very laboured. Should be changed when MPL
# finally has a constraint based layout manager.
figure = self.figure
_property = self.theme.themeables.property
pad_x = _property('axis_title_x', 'margin').get_as('t', 'pt')
pad_y = _property('axis_title_y', 'margin').get_as('r', 'pt')
# Get the axis labels (default or specified by user)
# and let the coordinate modify them e.g. flip
labels = self.coordinates.labels(NS(
x=self.layout.xlabel(self.labels),
y=self.layout.ylabel(self.labels)
))
# The first axes object is on left, and the last axes object
# is at the bottom. We change the transform so that the relevant
# coordinate is in figure coordinates. This way we take
# advantage of how MPL adjusts the label position so that they
# do not overlap with the tick text. This works well for
# facetting with scales='fixed' and also when not facetting.
# first_ax = self.axs[0]
# last_ax = self.axs[-1]
xlabel = self.facet.last_ax.set_xlabel(
labels.x, labelpad=pad_x)
ylabel = self.facet.first_ax.set_ylabel(
labels.y, labelpad=pad_y)
xlabel.set_transform(mtransforms.blended_transform_factory(
figure.transFigure, mtransforms.IdentityTransform()))
ylabel.set_transform(mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), figure.transFigure))
figure._themeable['axis_title_x'] = xlabel
figure._themeable['axis_title_y'] = ylabel
def _draw_title(self):
"""
Draw title onto the figure
"""
# This is very laboured. Should be changed when MPL
# finally has a constraint based layout manager.
figure = self.figure
title = self.labels.get('title', '')
_property = self.theme.themeables.property
# Pick suitable values in inches and convert them to
# transFigure dimension. This gives fixed spacing
# margins which work for oblong plots.
top = figure.subplotpars.top
W, H = figure.get_size_inches()
# Adjust the title to avoid overlap with the facet
# labels on the top row
# pad/H is inches in transFigure coordinates. A fixed
# margin value in inches prevents oblong plots from
# getting unpredictably large spaces.
linespacing = _property('plot_title', 'linespacing')
fontsize = _property('plot_title', 'size')
pad = _property('plot_title', 'margin').get_as('b', 'in')
ha = _property('plot_title', 'ha')
strip_margin_y = _property('strip_margin_y')
dpi = 72.27
line_size = fontsize / dpi
num_lines = len(title.split('\n'))
title_size = line_size * linespacing * num_lines
strip_height = self.facet.strips.breadth('top')
strip_height *= (1 + strip_margin_y)
if ha == 'left':
x = SUBPLOTS_ADJUST['left']
elif ha == 'right':
x = SUBPLOTS_ADJUST['right']
else:
# ha='center' is default
x = 0.5
y = top + (strip_height+title_size/2+pad)/H
text = figure.text(x, y, title, ha=ha, va='center')
figure._themeable['plot_title'] = text
def _draw_caption(self):
"""
Draw caption onto the figure
"""
# This is very laboured. Should be changed when MPL
# finally has a constraint based layout manager.
figure = self.figure
caption = self.labels.get('caption', '')
_property = self.theme.themeables.property
# Pick suitable values in inches and convert them to
# transFigure dimension. This gives fixed spacing
# margins which work for oblong plots.
right = figure.subplotpars.right
W, H = figure.get_size_inches()
margin = _property('plot_caption', 'margin')
right_pad = margin.get_as('r', 'in')
top_pad = margin.get_as('t', 'in')
x = right - right_pad/W
y = 0 - top_pad/H
text = figure.text(x, y, caption, ha='right', va='top')
figure._themeable['plot_caption'] = text
def _draw_watermarks(self):
"""
Draw watermark onto figure
"""
for wm in self.watermarks:
wm.draw(self.figure)
def _apply_theme(self):
"""
Apply theme attributes to Matplotlib objects
"""
self.theme.apply_axs(self.axs)
self.theme.apply_figure(self.figure)
def _save_filename(self, ext: str) -> Path:
"""
Make a filename for use by the save method
Parameters
----------
ext : str
Extension e.g. png, pdf, ...
"""
hash_token = abs(self.__hash__())
return Path(f'plotnine-save-{hash_token}.{ext}')
def _update_labels(self, layer):
"""
Update label data for the ggplot
Parameters
----------
layer : layer
New layer that has just been added to the ggplot
object.
"""
mapping = make_labels(layer.mapping)
default = make_labels(layer.stat.DEFAULT_AES)
new_labels = defaults(mapping, default)
self.labels = defaults(self.labels, new_labels)
def save(
self,
filename: Union[str, Path] | None = None,
format: str | None = None,
path: str | None = None,
width: float | None = None,
height: float | None = None,
units: str = 'in',
dpi: float | None = None,
limitsize: bool = True,
verbose: bool = True,
**kwargs: Any
) -> None:
"""
Save a ggplot object as an image file
Parameters
----------
filename : str | pathlib.Path, optional
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.<format>” is used.
format : str
Image format to use, automatically extract from
file name extension.
path : str
Path to save plot to (if you just want to set path and
not filename).
width : number, optional
Width (defaults to value set by the theme). If specified
the `height` must also be given.
height : number, optional
Height (defaults to value set by the theme). If specified
the `width` must also be given.
units : str
Units for width and height when either one is explicitly
specified (in, cm, or mm).
dpi : float
DPI to use for raster graphics. If None, defaults to using
the `dpi` of theme, if none is set then a `dpi` of 100.
limitsize : bool
If ``True`` (the default), ggsave will not save images
larger than 50x50 inches, to prevent the common error
of specifying dimensions in pixels.
verbose : bool
If ``True``, print the saving information.
kwargs : dict
Additional arguments to pass to matplotlib `savefig()`.
"""
fig_kwargs = {'bbox_inches': 'tight', # 'tight' is a good default
'format': format}
fig_kwargs.update(kwargs)
# filename, depends on the object
if filename is None:
ext = format if format else 'pdf'
filename = self._save_filename(ext)
if path:
filename = Path(path) / filename
# Preserve the users object
self = deepcopy(self)
# The figure size should be known by the theme
if width is not None and height is not None:
width = to_inches(width, units)
height = to_inches(height, units)
self += theme(figure_size=(width, height))
elif (width is None and height is not None or
width is not None and height is None):
raise PlotnineError(
"You must specify both width and height")
width, height = self.theme.themeables.property('figure_size')
if limitsize and (width > 25 or height > 25):
raise PlotnineError(
f"Dimensions ({width=}, {height=}) exceed 25 inches "
"(height and width are specified in inches/cm/mm, "
"not pixels). If you are sure you want these "
"dimensions, use 'limitsize=False'."
)
if verbose:
_w = from_inches(width, units)
_h = from_inches(height, units)
warn(f"Saving {_w} x {_h} {units} image.", PlotnineWarning)
warn(f'Filename: {filename}', PlotnineWarning)
if dpi is not None:
self.theme = self.theme + theme(dpi=dpi)
fig = self.draw(show=False)
fig.savefig(filename, **fig_kwargs)
ggsave = ggplot.save
def save_as_pdf_pages(
plots: Iterable[ggplot],
filename: str | None = None,
path: str | None = None,
verbose: bool = True,
**kwargs: Any
) -> None:
"""
Save multiple :class:`ggplot` objects to a PDF file, one per page.
Parameters
----------
plots : collection or generator of :class:`ggplot`
Plot objects to write to file. `plots` may be either a
collection such as a :py:class:`list` or :py:class:`set`:
>>> base_plot = ggplot(…)
>>> plots = [base_plot + ggtitle('%d of 3' % i) for i in range(1, 3)]
>>> save_as_pdf_pages(plots)
or, a generator that yields :class:`ggplot` objects:
>>> def myplots():
>>> for i in range(1, 3):
>>> yield ggplot(…) + ggtitle('%d of 3' % i)
>>> save_as_pdf_pages(myplots())
filename : :py:class:`str`, optional
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.pdf” is used.
path : :py:class:`str`, optional
Path to save plot to (if you just want to set path and
not filename).
verbose : :py:class:`bool`
If ``True``, print the saving information.
kwargs : :py:class:`dict`
Additional arguments to pass to
:py:meth:`matplotlib.figure.Figure.savefig`.
Notes
-----
Using pandas' :meth:`~pandas.DataFrame.groupby` methods, tidy data
can be “faceted” across pages:
>>> from plotnine.data import mtcars
>>> def facet_pages(column)
>>> base_plot = [
>>> aes(x='wt', y='mpg', label='name'),
>>> geom_text(),
>>> ]
>>> for label, group_data in mtcars.groupby(column):
>>> yield ggplot(group_data) + base_plot + ggtitle(label)
>>> save_as_pdf_pages(facet_pages('cyl'))
Unlike :meth:`ggplot.save`, :meth:`save_as_pdf_pages` does not
process arguments for `height` or `width`. To set the figure size,
add :class:`~plotnine.themes.themeable.figure_size` to the theme
for some or all of the objects in `plots`:
>>> plot = ggplot(…)
>>> # The following are equivalent
>>> plot.save('filename.pdf', height=6, width=8)
>>> save_as_pdf_pages([plot + theme(figure_size=(8, 6))])
"""
# as in ggplot.save()
fig_kwargs = {'bbox_inches': 'tight'}
fig_kwargs.update(kwargs)
# If plots is already an iterator, this is a no-op; otherwise
# convert a list, etc. to an iterator
plots = iter(plots)
# filename, depends on the object
filename: str | Path | None = filename # broaden allowed type for var
if filename is None:
# Take the first element from the iterator, store it, and
# use it to generate a file name
peek = [next(plots)]
plots = chain(peek, plots)
filename = peek[0]._save_filename('pdf')
if path:
filename = Path(path) / filename
if verbose:
warn(f'Filename: {filename}', PlotnineWarning)
with PdfPages(filename) as pdf:
# Re-add the first element to the iterator, if it was removed
for plot in plots:
fig = plot.draw()
# Save as a page in the PDF file
pdf.savefig(fig, **fig_kwargs)
class plot_context:
"""
Context to setup the environment within with the plot is built
Parameters
----------
plot : ggplot
ggplot object to be built within the context.
show : bool (default: False)
Whether to show (``plt.show()``) the plot before the context
exits.
"""
def __init__(self, plot: ggplot, show: bool = False) -> None:
self.plot = plot
self.show = show
def __enter__(self) -> plot_context:
"""
Enclose in matplolib & pandas environments
"""
self.rc_context = mpl.rc_context(self.plot.theme.rcParams)
# Pandas deprecated is_copy, and when we create new dataframes
# from slices we do not want complaints. We always uses the
# new frames knowing that they are separate from the original.
self.pd_option_context = pd.option_context(
'mode.chained_assignment', None
)
self.rc_context.__enter__()
self.pd_option_context.__enter__()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""
Exit matplotlib & pandas environments
"""
if exc_type is None:
if self.show:
plt.show()
else:
plt.close(self.plot.figure)
else:
# There is an exception, close any figure
if self.plot.figure is not None:
plt.close(self.plot.figure)
self.rc_context.__exit__(exc_type, exc_value, exc_traceback)
self.pd_option_context.__exit__(exc_type, exc_value, exc_traceback)
|
{
"content_hash": "64f434f1ce7f401da3680f90c88289bb",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 79,
"avg_line_length": 32.34632516703786,
"alnum_prop": 0.569077701655937,
"repo_name": "has2k1/plotnine",
"id": "e78c48e2ca0f9e3a55f2a856509a45d3dcfd06d2",
"size": "29065",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plotnine/ggplot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1729"
},
{
"name": "Python",
"bytes": "991031"
},
{
"name": "Shell",
"bytes": "2997"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_extender_controller_extender
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_extender_controller_extender.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_extender_controller_extender_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'extender_controller_extender': {
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
expected_data = {
'aaa-shared-secret': 'test_value_3',
'access-point-name': 'test_value_4',
'admin': 'disable',
'at-dial-script': 'test_value_6',
'billing-start-day': '7',
'cdma-aaa-spi': 'test_value_8',
'cdma-ha-spi': 'test_value_9',
'cdma-nai': 'test_value_10',
'conn-status': '11',
'description': 'test_value_12',
'dial-mode': 'dial-on-demand',
'dial-status': '14',
'ext-name': 'test_value_15',
'ha-shared-secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated-update': 'enable',
'mode': 'standalone',
'modem-passwd': 'test_value_21',
'modem-type': 'cdma',
'multi-mode': 'auto',
'ppp-auth-protocol': 'auto',
'ppp-echo-request': 'enable',
'ppp-password': 'test_value_26',
'ppp-username': 'test_value_27',
'primary-ha': 'test_value_28',
'quota-limit-mb': '29',
'redial': 'none',
'redundant-intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary-ha': 'test_value_34',
'sim-pin': 'test_value_35',
'vdom': '36',
'wimax-auth-protocol': 'tls',
'wimax-carrier': 'test_value_38',
'wimax-realm': 'test_value_39'
}
set_method_mock.assert_called_with('extender-controller', 'extender', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_extender_controller_extender_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'extender_controller_extender': {
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
expected_data = {
'aaa-shared-secret': 'test_value_3',
'access-point-name': 'test_value_4',
'admin': 'disable',
'at-dial-script': 'test_value_6',
'billing-start-day': '7',
'cdma-aaa-spi': 'test_value_8',
'cdma-ha-spi': 'test_value_9',
'cdma-nai': 'test_value_10',
'conn-status': '11',
'description': 'test_value_12',
'dial-mode': 'dial-on-demand',
'dial-status': '14',
'ext-name': 'test_value_15',
'ha-shared-secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated-update': 'enable',
'mode': 'standalone',
'modem-passwd': 'test_value_21',
'modem-type': 'cdma',
'multi-mode': 'auto',
'ppp-auth-protocol': 'auto',
'ppp-echo-request': 'enable',
'ppp-password': 'test_value_26',
'ppp-username': 'test_value_27',
'primary-ha': 'test_value_28',
'quota-limit-mb': '29',
'redial': 'none',
'redundant-intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary-ha': 'test_value_34',
'sim-pin': 'test_value_35',
'vdom': '36',
'wimax-auth-protocol': 'tls',
'wimax-carrier': 'test_value_38',
'wimax-realm': 'test_value_39'
}
set_method_mock.assert_called_with('extender-controller', 'extender', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_extender_controller_extender_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'extender_controller_extender': {
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('extender-controller', 'extender', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_extender_controller_extender_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'extender_controller_extender': {
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('extender-controller', 'extender', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_extender_controller_extender_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'extender_controller_extender': {
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
expected_data = {
'aaa-shared-secret': 'test_value_3',
'access-point-name': 'test_value_4',
'admin': 'disable',
'at-dial-script': 'test_value_6',
'billing-start-day': '7',
'cdma-aaa-spi': 'test_value_8',
'cdma-ha-spi': 'test_value_9',
'cdma-nai': 'test_value_10',
'conn-status': '11',
'description': 'test_value_12',
'dial-mode': 'dial-on-demand',
'dial-status': '14',
'ext-name': 'test_value_15',
'ha-shared-secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated-update': 'enable',
'mode': 'standalone',
'modem-passwd': 'test_value_21',
'modem-type': 'cdma',
'multi-mode': 'auto',
'ppp-auth-protocol': 'auto',
'ppp-echo-request': 'enable',
'ppp-password': 'test_value_26',
'ppp-username': 'test_value_27',
'primary-ha': 'test_value_28',
'quota-limit-mb': '29',
'redial': 'none',
'redundant-intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary-ha': 'test_value_34',
'sim-pin': 'test_value_35',
'vdom': '36',
'wimax-auth-protocol': 'tls',
'wimax-carrier': 'test_value_38',
'wimax-realm': 'test_value_39'
}
set_method_mock.assert_called_with('extender-controller', 'extender', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_extender_controller_extender_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'extender_controller_extender': {
'random_attribute_not_valid': 'tag',
'aaa_shared_secret': 'test_value_3',
'access_point_name': 'test_value_4',
'admin': 'disable',
'at_dial_script': 'test_value_6',
'billing_start_day': '7',
'cdma_aaa_spi': 'test_value_8',
'cdma_ha_spi': 'test_value_9',
'cdma_nai': 'test_value_10',
'conn_status': '11',
'description': 'test_value_12',
'dial_mode': 'dial-on-demand',
'dial_status': '14',
'ext_name': 'test_value_15',
'ha_shared_secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated_update': 'enable',
'mode': 'standalone',
'modem_passwd': 'test_value_21',
'modem_type': 'cdma',
'multi_mode': 'auto',
'ppp_auth_protocol': 'auto',
'ppp_echo_request': 'enable',
'ppp_password': 'test_value_26',
'ppp_username': 'test_value_27',
'primary_ha': 'test_value_28',
'quota_limit_mb': '29',
'redial': 'none',
'redundant_intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary_ha': 'test_value_34',
'sim_pin': 'test_value_35',
'vdom': '36',
'wimax_auth_protocol': 'tls',
'wimax_carrier': 'test_value_38',
'wimax_realm': 'test_value_39'
},
'vdom': 'root'}
is_error, changed, response = fortios_extender_controller_extender.fortios_extender_controller(input_data, fos_instance)
expected_data = {
'aaa-shared-secret': 'test_value_3',
'access-point-name': 'test_value_4',
'admin': 'disable',
'at-dial-script': 'test_value_6',
'billing-start-day': '7',
'cdma-aaa-spi': 'test_value_8',
'cdma-ha-spi': 'test_value_9',
'cdma-nai': 'test_value_10',
'conn-status': '11',
'description': 'test_value_12',
'dial-mode': 'dial-on-demand',
'dial-status': '14',
'ext-name': 'test_value_15',
'ha-shared-secret': 'test_value_16',
'id': '17',
'ifname': 'test_value_18',
'initiated-update': 'enable',
'mode': 'standalone',
'modem-passwd': 'test_value_21',
'modem-type': 'cdma',
'multi-mode': 'auto',
'ppp-auth-protocol': 'auto',
'ppp-echo-request': 'enable',
'ppp-password': 'test_value_26',
'ppp-username': 'test_value_27',
'primary-ha': 'test_value_28',
'quota-limit-mb': '29',
'redial': 'none',
'redundant-intf': 'test_value_31',
'roaming': 'enable',
'role': 'none',
'secondary-ha': 'test_value_34',
'sim-pin': 'test_value_35',
'vdom': '36',
'wimax-auth-protocol': 'tls',
'wimax-carrier': 'test_value_38',
'wimax-realm': 'test_value_39'
}
set_method_mock.assert_called_with('extender-controller', 'extender', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
{
"content_hash": "b5f019dffbf9499fe3b05b7841f98f09",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 142,
"avg_line_length": 39.1915285451197,
"alnum_prop": 0.5209811569005216,
"repo_name": "thaim/ansible",
"id": "7d3bd9e108c8f030994c37f09a7028ae6c6c07d9",
"size": "21977",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_extender_controller_extender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from twisted.cred import credentials # pylint: disable=import-error
from twisted.internet import defer, protocol, reactor # pylint: disable=import-error
from twisted.spread import pb # pylint: disable=import-error
from platformio.app import get_host_id
from platformio.clients.account import AccountClient
class RemoteClientFactory(pb.PBClientFactory, protocol.ReconnectingClientFactory):
def clientConnectionMade(self, broker):
if self.sslContextFactory and not self.sslContextFactory.certificate_verified:
self.remote_client.log.error(
"A remote cloud could not prove that its security certificate is "
"from {host}. This may cause a misconfiguration or an attacker "
"intercepting your connection.",
host=self.sslContextFactory.host,
)
return self.remote_client.disconnect()
pb.PBClientFactory.clientConnectionMade(self, broker)
protocol.ReconnectingClientFactory.resetDelay(self)
self.remote_client.log.info("Successfully connected")
self.remote_client.log.info("Authenticating")
auth_token = None
try:
auth_token = AccountClient().fetch_authentication_token()
except Exception as e: # pylint:disable=broad-except
d = defer.Deferred()
d.addErrback(self.clientAuthorizationFailed)
d.errback(pb.Error(e))
return d
d = self.login(
credentials.UsernamePassword(
auth_token.encode(),
get_host_id().encode(),
),
client=self.remote_client,
)
d.addCallback(self.remote_client.cb_client_authorization_made)
d.addErrback(self.clientAuthorizationFailed)
return d
def clientAuthorizationFailed(self, err):
AccountClient.delete_local_session()
self.remote_client.cb_client_authorization_failed(err)
def clientConnectionFailed(self, connector, reason):
self.remote_client.log.warn(
"Could not connect to PIO Remote Cloud. Reconnecting..."
)
self.remote_client.cb_disconnected(reason)
protocol.ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason
)
def clientConnectionLost( # pylint: disable=arguments-differ
self, connector, unused_reason
):
if not reactor.running:
self.remote_client.log.info("Successfully disconnected")
return
self.remote_client.log.warn(
"Connection is lost to PIO Remote Cloud. Reconnecting"
)
pb.PBClientFactory.clientConnectionLost(
self, connector, unused_reason, reconnecting=1
)
self.remote_client.cb_disconnected(unused_reason)
protocol.ReconnectingClientFactory.clientConnectionLost(
self, connector, unused_reason
)
|
{
"content_hash": "0f315a4c46493e9b3f754c428e1cf384",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 40.763888888888886,
"alnum_prop": 0.6572402044293015,
"repo_name": "platformio/platformio",
"id": "712449d52781115bb6fae0cef1baba4402d71570",
"size": "3546",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/commands/remote/factory/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1826"
},
{
"name": "Makefile",
"bytes": "356"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "333618"
},
{
"name": "Smarty",
"bytes": "45408"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
from xarray.backends.common import robust_getitem
class DummyFailure(Exception):
pass
class DummyArray:
def __init__(self, failures):
self.failures = failures
def __getitem__(self, key):
if self.failures:
self.failures -= 1
raise DummyFailure
return "success"
def test_robust_getitem() -> None:
array = DummyArray(failures=2)
with pytest.raises(DummyFailure):
array[...]
result = robust_getitem(array, ..., catch=DummyFailure, initial_delay=1)
assert result == "success"
array = DummyArray(failures=3)
with pytest.raises(DummyFailure):
robust_getitem(array, ..., catch=DummyFailure, initial_delay=1, max_retries=2)
|
{
"content_hash": "bcd4a780e8b564389514e4d57bd09671",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 86,
"avg_line_length": 24.1875,
"alnum_prop": 0.6524547803617571,
"repo_name": "markelg/xray",
"id": "c7dba36ea5807e90c322f54622bea124925f6ab0",
"size": "774",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "xarray/tests/test_backends_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5873"
},
{
"name": "HTML",
"bytes": "1343"
},
{
"name": "Python",
"bytes": "4490453"
},
{
"name": "Shell",
"bytes": "1262"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.testbase import TestBase
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-testbase
# USAGE
python package_run_test.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = TestBase(
credential=DefaultAzureCredential(),
subscription_id="subscription-id",
)
response = client.packages.run_test(
resource_group_name="contoso-rg1",
test_base_account_name="contoso-testBaseAccount1",
package_name="contoso-package2",
)
print(response)
# x-ms-original-file: specification/testbase/resource-manager/Microsoft.TestBase/preview/2022-04-01-preview/examples/PackageRunTest.json
if __name__ == "__main__":
main()
|
{
"content_hash": "8dee9eeeac7bd90395c194b8eb545609",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 136,
"avg_line_length": 32.44117647058823,
"alnum_prop": 0.7216681776971895,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7f12597aa979b8af435f8dfdcf79adb603ff47fd",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/testbase/azure-mgmt-testbase/generated_samples/package_run_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
from controller_properties import *
class CC_AgentProperties(object):
# all schemas
poll_duration_target_schema = properties.Schema(
properties.Schema.NUMBER,
_("Discovery poll target duration; a scale factor of 1+ is computed with the actual discovery (actual/target) and used to tweak slow and fast poll intervals"),
required=False,
update_allowed=True,
)
poll_slow_target_schema = properties.Schema(
properties.Schema.NUMBER,
_("Slow poll interval"),
required=False,
update_allowed=True,
)
poll_fast_target_schema = properties.Schema(
properties.Schema.NUMBER,
_("Fast poll interval"),
required=False,
update_allowed=True,
)
async_retries_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum polls to check for async jobs to finish"),
required=False,
update_allowed=True,
)
async_retries_delay_schema = properties.Schema(
properties.Schema.NUMBER,
_("Delay between each async job status poll check"),
required=False,
update_allowed=True,
)
vnic_retries_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum polls to check for vnics to be attached to VM"),
required=False,
update_allowed=True,
)
vnic_retries_delay_schema = properties.Schema(
properties.Schema.NUMBER,
_("Delay between each vnic status poll check"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'poll_duration_target',
'poll_slow_target',
'poll_fast_target',
'async_retries',
'async_retries_delay',
'vnic_retries',
'vnic_retries_delay',
)
# mapping of properties to their schemas
properties_schema = {
'poll_duration_target': poll_duration_target_schema,
'poll_slow_target': poll_slow_target_schema,
'poll_fast_target': poll_fast_target_schema,
'async_retries': async_retries_schema,
'async_retries_delay': async_retries_delay_schema,
'vnic_retries': vnic_retries_schema,
'vnic_retries_delay': vnic_retries_delay_schema,
}
class Hypervisor_Properties(object):
# all schemas
htype_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['DEFAULT', 'VMWARE_VSAN', 'VMWARE_ESX', 'KVM']),
],
)
max_nics_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
max_ips_per_nic_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'htype',
'max_nics',
'max_ips_per_nic',
)
# mapping of properties to their schemas
properties_schema = {
'htype': htype_schema,
'max_nics': max_nics_schema,
'max_ips_per_nic': max_ips_per_nic_schema,
}
class CC_Properties(object):
# all schemas
rpc_poll_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
rpc_queue_size_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'rpc_poll_interval',
'rpc_queue_size',
)
# mapping of properties to their schemas
properties_schema = {
'rpc_poll_interval': rpc_poll_interval_schema,
'rpc_queue_size': rpc_queue_size_schema,
}
class CloudMeta(object):
# all schemas
key_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
value_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'key',
'value',
)
# mapping of properties to their schemas
properties_schema = {
'key': key_schema,
'value': value_schema,
}
class CloudFlavor(object):
# all schemas
id_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
ram_mb_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
disk_gb_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
vcpus_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
public_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
max_nics_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
max_ips_per_nic_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
enhanced_nw_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
meta_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=CloudMeta.properties_schema,
required=True,
update_allowed=False,
)
meta_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=meta_item_schema,
required=False,
update_allowed=True,
)
cost_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'id',
'name',
'ram_mb',
'disk_gb',
'vcpus',
'public',
'max_nics',
'max_ips_per_nic',
'enhanced_nw',
'meta',
'cost',
)
# mapping of properties to their schemas
properties_schema = {
'id': id_schema,
'name': name_schema,
'ram_mb': ram_mb_schema,
'disk_gb': disk_gb_schema,
'vcpus': vcpus_schema,
'public': public_schema,
'max_nics': max_nics_schema,
'max_ips_per_nic': max_ips_per_nic_schema,
'enhanced_nw': enhanced_nw_schema,
'meta': meta_schema,
'cost': cost_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'meta': getattr(CloudMeta, 'field_references', {}),
}
class CloudInfo(object):
# all schemas
vtype_schema = properties.Schema(
properties.Schema.STRING,
_("Cloud type"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['CLOUD_VCENTER', 'CLOUD_DOCKER_UCP', 'CLOUD_APIC', 'CLOUD_OPENSTACK', 'CLOUD_MESOS', 'CLOUD_RANCHER', 'CLOUD_VCA', 'CLOUD_LINUXSERVER', 'CLOUD_OSHIFT_K8S', 'CLOUD_AWS', 'CLOUD_NONE']),
],
)
htypes_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['DEFAULT', 'VMWARE_VSAN', 'VMWARE_ESX', 'KVM']),
],
)
htypes_schema = properties.Schema(
properties.Schema.LIST,
_("Supported hypervisors"),
schema=htypes_item_schema,
required=False,
update_allowed=True,
)
flavor_regex_filter_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
flavor_props_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=CloudFlavor.properties_schema,
required=True,
update_allowed=False,
)
flavor_props_schema = properties.Schema(
properties.Schema.LIST,
_("Flavor properties specific to this cloud type"),
schema=flavor_props_item_schema,
required=False,
update_allowed=True,
)
cca_props_schema = properties.Schema(
properties.Schema.MAP,
_("CloudConnectorAgent properties specific to this cloud type"),
schema=CC_AgentProperties.properties_schema,
required=False,
update_allowed=True,
)
controller_props_schema = properties.Schema(
properties.Schema.MAP,
_("Controller properties specific to this cloud type"),
schema=ControllerProperties.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'vtype',
'htypes',
'flavor_regex_filter',
'flavor_props',
'cca_props',
'controller_props',
)
# mapping of properties to their schemas
properties_schema = {
'vtype': vtype_schema,
'htypes': htypes_schema,
'flavor_regex_filter': flavor_regex_filter_schema,
'flavor_props': flavor_props_schema,
'cca_props': cca_props_schema,
'controller_props': controller_props_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'flavor_props': getattr(CloudFlavor, 'field_references', {}),
'cca_props': getattr(CC_AgentProperties, 'field_references', {}),
'controller_props': getattr(ControllerProperties, 'field_references', {}),
}
class CloudProperties(AviResource):
resource_name = "cloudproperties"
# all schemas
cc_vtypes_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['CLOUD_VCENTER', 'CLOUD_DOCKER_UCP', 'CLOUD_APIC', 'CLOUD_OPENSTACK', 'CLOUD_MESOS', 'CLOUD_RANCHER', 'CLOUD_VCA', 'CLOUD_LINUXSERVER', 'CLOUD_OSHIFT_K8S', 'CLOUD_AWS', 'CLOUD_NONE']),
],
)
cc_vtypes_schema = properties.Schema(
properties.Schema.LIST,
_("Cloud types supported by CloudConnector"),
schema=cc_vtypes_item_schema,
required=False,
update_allowed=True,
)
hyp_props_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=Hypervisor_Properties.properties_schema,
required=True,
update_allowed=False,
)
hyp_props_schema = properties.Schema(
properties.Schema.LIST,
_("Hypervisor properties"),
schema=hyp_props_item_schema,
required=False,
update_allowed=True,
)
cc_props_schema = properties.Schema(
properties.Schema.MAP,
_("CloudConnector properties"),
schema=CC_Properties.properties_schema,
required=False,
update_allowed=True,
)
info_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=CloudInfo.properties_schema,
required=True,
update_allowed=False,
)
info_schema = properties.Schema(
properties.Schema.LIST,
_("Properties specific to a cloud type"),
schema=info_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'cc_vtypes',
'hyp_props',
'cc_props',
'info',
)
# mapping of properties to their schemas
properties_schema = {
'cc_vtypes': cc_vtypes_schema,
'hyp_props': hyp_props_schema,
'cc_props': cc_props_schema,
'info': info_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'info': getattr(CloudInfo, 'field_references', {}),
'hyp_props': getattr(Hypervisor_Properties, 'field_references', {}),
'cc_props': getattr(CC_Properties, 'field_references', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::CloudProperties': CloudProperties,
}
|
{
"content_hash": "f8950ddd35d369e389e03749474b58e8",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 223,
"avg_line_length": 27.11181434599156,
"alnum_prop": 0.5882032526651623,
"repo_name": "ypraveen/avi-heat",
"id": "40fdffd4e8a74d1ff2d5c325458d8fcba21470d3",
"size": "12937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avi/heat/resources/cloud_properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "888069"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
}
|
"""
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from byceps.services.tourney import (
match_comment_service as comment_service,
match_service,
)
def test_get_comments_for_match(
api_client, api_client_authz_header, match, comment
):
url = f'/api/v1/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.created_by.id),
'screen_name': comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': 'Denn man tau.',
'body_html': 'Denn man tau.',
'last_edited_at': None,
'last_editor': None,
'hidden': False,
'hidden_at': None,
'hidden_by': None,
}
]
}
def test_get_comments_for_match_with_party_id(
api_client, api_client_authz_header, match, comment, party
):
url = f'/api/v1/tourney/matches/{match.id}/comments?party_id={party.id}'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.created_by.id),
'screen_name': comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': 'Denn man tau.',
'body_html': 'Denn man tau.',
'last_edited_at': None,
'last_editor': None,
'hidden': False,
'hidden_at': None,
'hidden_by': None,
}
]
}
def test_get_comments_for_match_with_edited_comment(
api_client, api_client_authz_header, match, edited_comment
):
url = f'/api/v1/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(edited_comment.id),
'match_id': str(edited_comment.match_id),
'created_at': edited_comment.created_at.isoformat(),
'creator': {
'user_id': str(edited_comment.created_by.id),
'screen_name': edited_comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': '[b]So nicht[/b], Freundchen!',
'body_html': '<strong>So nicht</strong>, Freundchen!',
'last_edited_at': edited_comment.last_edited_at.isoformat(),
'last_editor': {
'user_id': str(edited_comment.last_edited_by.id),
'screen_name': edited_comment.last_edited_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'hidden': False,
'hidden_at': None,
'hidden_by': None,
}
]
}
# helpers
@pytest.fixture
def match(api_app):
return match_service.create_match()
@pytest.fixture
def comment(api_app, match, user):
return comment_service.create_comment(match.id, user.id, 'Denn man tau.')
@pytest.fixture
def edited_comment(api_app, comment, admin_user):
comment_service.update_comment(
comment.id, admin_user.id, '[b]So nicht[/b], Freundchen!'
)
return comment_service.get_comment(comment.id)
|
{
"content_hash": "f3d9b9af70aa5919e6de3ad313c43687",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 77,
"avg_line_length": 33.19444444444444,
"alnum_prop": 0.5110878661087866,
"repo_name": "homeworkprod/byceps",
"id": "70521cbbe197b6dcf029d1b5b966711aa956d349",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/api/v1/tourney/match/comments/test_get_comments_for_match.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
}
|
import argparse
import csv
import datetime
import pandas
# pylint: disable=superfluous-parens
# pylint: disable=broad-except
import logging
import os
import sys
from sqlalchemy.orm import aliased
from sqlalchemy import func, update
from rdr_service.code_constants import BASICS_PROFILE_UPDATE_QUESTION_CODES
from rdr_service.dao.questionnaire_response_dao import QuestionnaireResponseDao
from rdr_service.model.bq_questionnaires import BQPDRTheBasicsSchema
from rdr_service.model.code import Code
from rdr_service.model.questionnaire import Questionnaire, QuestionnaireConcept
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer
from rdr_service.model.questionnaire import QuestionnaireQuestion
from rdr_service.services.system_utils import setup_logging, setup_i18n, print_progress_bar, list_chunks
from rdr_service.tools.tool_libs import GCPProcessContext, GCPEnvConfigObject
from rdr_service.participant_enums import QuestionnaireResponseClassificationType
_logger = logging.getLogger("rdr_logger")
# Tool_cmd and tool_desc name are required.
# Remember to add/update bash completion in 'tool_lib/tools.bash'
tool_cmd = "thebasics-analyzer"
tool_desc = "Tool to collect data on participants with partial/multiple TheBasics responses"
# Restrict verbose output of answer data for these fields (PII), by default
REDACTED_FIELDS = BQPDRTheBasicsSchema._force_boolean_fields
# Column headers for TSV export
EXPORT_FIELDS = ['participant_id', 'questionnaire_response_id', 'current_classification', 'authored', 'external_id',
'payload_type', 'duplicate_of', 'reason']
# Rows in a tool export results file marked as COMPLETE (default classification) will not need updating
# NOTE: Currently only expect DUPLICATE, PROFILE_UPDATE, and NO_ANSWER_VALUES to be assigned by the tool
CLASSIFICATION_UPDATE_VALUES = [str(QuestionnaireResponseClassificationType.DUPLICATE),
str(QuestionnaireResponseClassificationType.PROFILE_UPDATE),
str(QuestionnaireResponseClassificationType.NO_ANSWER_VALUES),
str(QuestionnaireResponseClassificationType.AUTHORED_TIME_UPDATED),
str(QuestionnaireResponseClassificationType.PARTIAL)
]
class TheBasicsAnalyzerClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list=None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
self.results = dict()
self.pids_with_partials = dict()
self.ro_dao = None
def get_the_basics_questionnaire_ids(self):
""" Return a list of all questionnaire_id values associated with TheBasics survey """
with self.ro_dao.session() as session:
results = session.query(
Questionnaire
).join(
QuestionnaireConcept, QuestionnaireConcept.questionnaireId == Questionnaire.questionnaireId
).join(
Code, QuestionnaireConcept.codeId == Code.codeId
).filter(
Code.value == 'TheBasics'
).all()
return [r.questionnaireId for r in results]
def add_results_to_tsv(self, pid, pid_results):
""" Add the results generated for a specific participant to the TSV export file """
# Participants who didn't have a TheBasics response, or only had a single complete survey response, are skipped
if not len(pid_results) or (len(pid_results) == 1 and 'COMPLETE' in pid_results[0]):
return
with open(self.args.export_to, 'a') as f:
tsv_writer = csv.writer(f, delimiter='\t')
for rec in pid_results:
row_values = [pid, rec['questionnaire_response_id'],
rec['current_classification'],
rec['authored'].strftime("%Y-%m-%d %H:%M:%S") if rec['authored'] else None,
rec['external_id'], rec['payload_type'], rec['duplicate_of'], rec['reason']]
tsv_writer.writerow(row_values)
# if blank row wanted between each pid's results:
# tsv_writer.writerow(['' for _ in EXPORT_FIELDS])
return
def generate_response_diff(self, curr_response, prior_response=None):
"""
Inspect two chronologically adjacent TheBasics responses dicts to generate a diff-like summary
:param curr_response: A dict of question code keys and answer values
:param prior_response: A dict of question code keys and answer values
"""
diff_details = list()
prior_response_keys = prior_response.keys() if prior_response else []
curr_response_keys = curr_response.keys()
key_set = set().union(prior_response_keys, curr_response_keys)
# Diff Tuple contains (<diff symbol>, <question code/field name>[, <answer value>])
# Diff symbols:
# + Field did not exist in prior response; new/added in the current response
# = Field exists in both prior and current response and has the same answer value
# ! Field exists in both prior and current response but answer value changed in the current response
# - Field existed in the prior response but is missing from the current response
#
# The answer value is included in the diff for new (+) or changed (!) answers; omitted if unchanged (=)
# If the prior_response is None (first payload), all content will be displayed as a new (+) answer
for key in sorted(key_set):
if key in prior_response_keys and key not in curr_response_keys:
diff_details.append(('-', key))
else:
answer = curr_response.get(key)
# Redact answers to free text fields (PII) unless they were skipped, or redaction is disabled
if key in curr_response_keys and key not in prior_response_keys:
answer_output = answer if (answer.lower() == 'pmi_skip' or key not in REDACTED_FIELDS
or self.args.no_redact) else '<redacted>'
diff_details.append(('+', key, answer_output))
elif prior_response[key] != curr_response[key]:
answer_output = answer if (answer.lower() == 'pmi_skip' or key not in REDACTED_FIELDS
or self.args.no_redact) else '<redacted>'
diff_details.append(('!', key, answer_output))
else:
diff_details.append(('=', key))
return diff_details
def get_response_as_dict(self, response_id : int, session=None) -> dict:
"""
Generate a dict of a TheBasics response. Includes meta data keys/values and an answers nested dict
with question code keys and answer values. The answer value is from a COALESCE of the
QuestionnaireResponseAnswer table's value_* columns for each possible answer datatype
:param response_id: Integer questionnaire response id
:param session: A DAO session() object, if one has already been instantiated.
"""
response_dict = {'answers': dict()}
if not session:
close_session = True
session = self.ro_dao.session()
else:
close_session = False
# Possible for the answer_list query below to return nothing in isolated cases where there were no answers in
# payload. So, grab the QuestionnaireResponse row separately as well to extract response meta data
meta_row = session.query(QuestionnaireResponse)\
.filter(QuestionnaireResponse.questionnaireResponseId == response_id).first()
answer = aliased(Code)
answer_list = session.query(
QuestionnaireResponse.questionnaireResponseId,
QuestionnaireResponse.answerHash,
QuestionnaireResponse.authored,
QuestionnaireResponse.externalId,
Code.value.label('question_code_value'),
func.coalesce(answer.value,
QuestionnaireResponseAnswer.valueBoolean,
QuestionnaireResponseAnswer.valueDate,
QuestionnaireResponseAnswer.valueDateTime,
QuestionnaireResponseAnswer.valueDecimal,
QuestionnaireResponseAnswer.valueInteger,
QuestionnaireResponseAnswer.valueString,
QuestionnaireResponseAnswer.valueSystem,
QuestionnaireResponseAnswer.valueUri
).label('answer_value')
).select_from(
QuestionnaireResponse
).join(
QuestionnaireResponseAnswer
).join(
QuestionnaireQuestion,
QuestionnaireResponseAnswer.questionId == QuestionnaireQuestion.questionnaireQuestionId
).join(
Code, QuestionnaireQuestion.codeId == Code.codeId
).outerjoin(
answer, QuestionnaireResponseAnswer.valueCodeId == answer.codeId
).filter(
QuestionnaireResponse.questionnaireResponseId == response_id,
QuestionnaireResponse.classificationType != QuestionnaireResponseClassificationType.DUPLICATE
).order_by(QuestionnaireResponse.authored,
QuestionnaireResponse.created
).all()
# Build nested dict of question code keys/answer values
for row in answer_list:
response_dict['answers'][row.question_code_value] = row.answer_value
response_dict['answer_count'] = len(response_dict['answers'].keys())
response_dict['questionnaireResponseId'] = response_id
response_dict['classificationType'] = meta_row.classificationType if meta_row else None
response_dict['answerHash'] = meta_row.answerHash if meta_row else None
response_dict['authored'] = meta_row.authored if meta_row else None
response_dict['externalId'] = meta_row.externalId if meta_row else None
if close_session:
session.close()
return response_dict
def output_response_history(self, pid, response_list):
"""
For --verbose mode, write formatted details of the participant's TheBasics data to stdout
"""
last_answers = None
last_response_id = None
for idx in range(len(response_list)):
rsp = response_list[idx]
rsp_id = rsp.get('questionnaire_response_id', None)
payload = str(rsp.get('payload_type', 'UNKNOWN'))
ext_id = rsp.get('external_id', None)
authored = datetime.datetime.strftime(rsp.get('authored'), '%Y-%m-%dT%H:%M:%S')
dup_of = rsp.get('duplicate_of', None)
reason = rsp.get('reason', '')
ans_hash = rsp.get('answer_hash')
curr_answers = response_list[idx].get('answers', None)
print('\n'.join([f'{"Participant":52}:\tP{pid}',
f'{"Questionnaire Response":52}:\t{rsp_id}',
f'{"Authored":52}:\t{authored}',
f'{"External id":52}:\t{ext_id}',
f'{"Answer hash":52}:\t{ans_hash}',
f'{"Payload inspection result":52}:\t{payload}',
f'{"Duplicate of":52}:\t{int(dup_of) if dup_of else None}',
f'{"Reason":52}:\t{reason}']))
diff = self.generate_response_diff(curr_answers, last_answers)
if len(diff):
if idx > 0:
# This is not the first response in the participant's list
print(f'\nResponse {rsp_id} content vs. last response {last_response_id}')
else:
print(f'\nResponse {rsp_id} content:')
for line in diff:
if len(line) == 3:
# line is tuple: (<diff symbol +! >, <question code>, <answer>)
print(f'{line[0]} {line[1]:50}:\t{line[2]}')
else:
# Line is tuple(<diff symbol =- >, <question code>)
print(f'{line[0]} {line[1]}')
print('\n')
last_answers = curr_answers
last_response_id = rsp_id
def inspect_responses(self, pid, response_list):
"""
Inspect the entire TheBasics response history for a participant
It will use the QuestionnaireResponseAnswer data for all the received payloads for this participant to
look for any that should be marked with a specific QuestionnaireResponseClassificationType value, such as
DUPLICATE or NO_ANSWER_VALUES. Requires comparing adjacent responses to find subset/superset DUPLICATE cases
:param pid: Participant ID
:param response_list: List of dicts with summary details about each of the participant's TheBasics responses
"""
if not len(response_list):
print(f'No data for participant {pid}')
return
last_response_answer_set, last_authored, last_response_type = (None, None, None)
last_position = 0
answer_hashes = [r['answer_hash'] for r in response_list]
has_completed_survey = False # Track if/when a COMPLETE survey response is detected
for curr_position in range(len(response_list)):
curr_response = response_list[curr_position]
curr_authored, curr_response_type, curr_rsp_id = (curr_response.get('authored', None),
curr_response.get('payload_type', None),
curr_response.get('questionnaire_response_id', None))
# Flag indeterminate ordering for two payloads w/ identical authored timestamps but different classification
if last_authored and last_authored == curr_authored and last_response_type != curr_response_type:
curr_response['reason'] = 'Same authored ts as last payload (indeterminate order)'
if curr_response_type == QuestionnaireResponseClassificationType.COMPLETE:
# Notable if more than one COMPLETED survey is encountered, or if the first COMPLETE survey was
# not the first response in the participant's history. Does not impact classification
if has_completed_survey:
response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],
'Multiple complete survey payloads'])
elif curr_position > 0:
response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],
'Partial received before first complete survey'])
has_completed_survey = True
answers = curr_response.get('answers')
# Some outlier cases where the payload had a FHIR doc containing question codes, but no
# answer data was sent for any of them. See: questionnaire_response_ids 101422823 or 999450910
# These will be ignored when producing diffs between chronologically adjacent authored responses
if not answers:
response_list[curr_position]['payload_type'] = QuestionnaireResponseClassificationType.NO_ANSWER_VALUES
curr_response_answer_set = None
else:
# Sets are used here to enable check for subset/superset relationships between response data
curr_response_answer_set = set(answers.items())
if last_response_answer_set is not None:
# index() will find the first location in the answer_hashes list containing the current response's
# answer hash. If it doesn't match the current response's position, the current response is
# a duplicate (in answer content) of the earlier response. Set classification based on whether
# authored timestamp changed
matching_hash_idx = answer_hashes.index(curr_response['answer_hash'])
if matching_hash_idx != curr_position:
if curr_authored == response_list[matching_hash_idx].get('authored'):
reclassification = QuestionnaireResponseClassificationType.DUPLICATE
else:
reclassification = QuestionnaireResponseClassificationType.AUTHORED_TIME_UPDATED
dup_rsp_id = response_list[matching_hash_idx].get('questionnaire_response_id')
# Update the current response's classification
response_list[curr_position]['payload_type'] = reclassification
response_list[curr_position]['duplicate_of'] = dup_rsp_id
response_list[curr_position]['reason'] = ' '.join([response_list[curr_position]['reason'],
'Duplicate answer hash'])
# Check for the cascading response signature where last/subset is made a dup of current/superset
elif (curr_response_answer_set and curr_response_answer_set.issuperset(last_response_answer_set)
and last_position > 0):
response_list[last_position]['payload_type'] = \
QuestionnaireResponseClassificationType.DUPLICATE
response_list[last_position]['duplicate_of'] = curr_rsp_id
response_list[last_position]['reason'] = ' '.join([response_list[curr_position-1]['reason'],
'Subset of a cascading superset response'])
last_authored = response_list[curr_position]['authored']
last_response_type = response_list[curr_position]['payload_type']
last_response_answer_set = curr_response_answer_set
last_position = curr_position
if not has_completed_survey:
# Flag the last entry with a note that participant has no full survey
response_list[-1]['reason'] = ' '.join([response_list[-1]['reason'],
'Participant has no COMPLETE survey responses'])
if self.args.verbose:
print(f'\n===============Results for P{pid}====================\n')
self.output_response_history(pid, response_list)
if self.args.export_to:
self.add_results_to_tsv(pid, response_list)
return response_list
def process_participant_responses(self, pid, responses, session):
"""
Evaluate a participant's TheBasics response history.
:param pid: Participant ID
:param responses: QuestionnaireResponse result set of TheBasics responses for this participant, in chronological
order by authored time
:param session: session object
"""
if not len(responses):
raise (ValueError, f'P{pid}: TheBasics response list was empty')
# Each's pid's responses (one dict per TheBasics response payload) will be gathered into a list of dicts
result_details = list()
# Track if this participant has something other than completed surveys in their history
has_partial = False
for response in responses:
rsp_id = response.questionnaireResponseId
response_dict = self.get_response_as_dict(rsp_id, session=session)
if not len(response_dict.keys()):
print(f'No response data found for participant {pid} response id {rsp_id}')
continue
# A full response is identified by having a populated/"truthy" value for a question code key not in the list
# of potential profile update question codes.
full_survey = False
for field, value in response_dict['answers'].items():
if field not in BASICS_PROFILE_UPDATE_QUESTION_CODES and value:
full_survey = True
break
# Default duplicate_of and reason fields to None/empty string, may be revised in next inspection step
result_details.append({ 'questionnaire_response_id': response_dict.get('questionnaireResponseId', None),
'authored': response_dict.get('authored', None),
'current_classification':\
str(response_dict.get('classificationType',
QuestionnaireResponseClassificationType.COMPLETE)),
'answer_hash': response_dict.get('answerHash', None),
'external_id' : response_dict.get('externalId', None),
'payload_type': QuestionnaireResponseClassificationType.COMPLETE if full_survey\
else QuestionnaireResponseClassificationType.PROFILE_UPDATE,
'answers': response_dict.get('answers', None),
'duplicate_of': None,
'reason': '',
})
has_partial = has_partial or not full_survey
# Participants with just a single, full survey TheBasics response won't need additional inspection (unless
# verbose mode was requested). Inspection can flag duplicates including cascading subset/superset response cases
if has_partial or len(result_details) > 1 or self.args.verbose:
self.inspect_responses(pid, result_details)
def update_db_records_from_tsv(self):
"""
Ingest a previously created thebasics-analyzer tool export TSV file and perform related DB updates
Among other fields, each TSV row has a questionnaire_response_id and the payload_type (classification) to
be assigned for that response record in the QuestionnaireResponse table
"""
data_file = self.args.import_results
if data_file:
# Import the TSV data into a pandas dataframe
df = pandas.read_csv(data_file, sep="\t")
dao = QuestionnaireResponseDao()
with dao.session() as session:
print(f'Updating QuestionnaireResponse records from results file {data_file}...')
for classification in CLASSIFICATION_UPDATE_VALUES:
value_dict = {QuestionnaireResponse.classificationType:
QuestionnaireResponseClassificationType(classification)}
# Filter the matching dataframe rows that were assigned this classification and extract the
# questionnaire_response_id values from those rows into a list.
result_df = df.loc[df['payload_type'] == classification, 'questionnaire_response_id']
response_ids = [val for index, val in result_df.items()]
processed = 0
ids_to_update = len(response_ids)
if ids_to_update:
print(f'{ids_to_update} records will be classified as {classification}')
# list_chunks() yields sublist chunks up to a max size from the specified list
for id_batch in list_chunks(response_ids, 1000):
query = (
update(QuestionnaireResponse)
.values(value_dict)
.where(QuestionnaireResponse.questionnaireResponseId.in_(id_batch))
)
session.execute(query)
session.commit()
processed += len(id_batch)
print_progress_bar(processed, ids_to_update,
prefix="{0}/{1}:".format(processed, ids_to_update),
suffix="records updated")
else:
print(f'No records of classification {classification} in import file')
else:
_logger.error('No import file specified')
def run(self):
"""
Main program process
:return: Exit code value
"""
if not (self.args.import_results or self.id_list):
_logger.error('Nothing to process')
return 1
# TODO: For now, to perform DB updates, the records to be updated must be imported from a previous export
# Updates will not occur automatically during the analysis / processing of participant responses
if self.args.import_results:
# Uses the main database to perform writes/updates
self.gcp_env.activate_sql_proxy(replica=False)
self.update_db_records_from_tsv()
else:
# Write out the header row to a fresh/truncated export file, if export was specified
if self.args.export_to:
with open(self.args.export_to, 'w') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(EXPORT_FIELDS)
# operations other than import use the read-only replica
self.gcp_env.activate_sql_proxy(replica=True)
self.ro_dao = QuestionnaireResponseDao()
basics_ids = self.get_the_basics_questionnaire_ids()
processed_pid_count = 0
num_pids = len(self.id_list)
with self.ro_dao.session() as session:
for pid in self.id_list:
responses = session.query(
QuestionnaireResponse
).select_from(
QuestionnaireResponse
).filter(
QuestionnaireResponse.participantId == pid,
QuestionnaireResponse.questionnaireId.in_(basics_ids)
).order_by(
QuestionnaireResponse.authored,
QuestionnaireResponse.created
).all()
if responses:
self.process_participant_responses(pid, responses, session)
processed_pid_count += 1
if not self.args.verbose:
print_progress_bar(
processed_pid_count, num_pids, prefix="{0}/{1}:".format(processed_pid_count, num_pids),
suffix="pids processed")
def get_id_list(fname):
"""
:param fname: The filename passed with the --from-file argument
:return: A list of integers, or None on missing/empty fname
"""
filename = os.path.expanduser(fname)
if not os.path.exists(filename):
_logger.error(f"File '{fname}' not found.")
return None
# read ids from file.
ids = open(os.path.expanduser(fname)).readlines()
# convert ids from a list of strings to a list of integers.
ids = [int(i) for i in ids if i.strip()]
return ids if len(ids) else None
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
parser.add_argument("--account", help="pmi-ops account", default=None) # noqa
parser.add_argument("--service-account", help="gcp iam service account", default=None) # noqa
parser.add_argument("--id", help="Analyze TheBasics data for a single participant_id",
type=int, default=None)
parser.add_argument("--from-file", help="Analyze TheBasics data for a list of participant ids in the file",
metavar='FILE', type=str, default=None)
parser.add_argument("--verbose",
help="Display participant question/answer content to stdout. Free text redacted by default",
default=False, action="store_true")
parser.add_argument("--no-redact",
help="If --verbose, displays unredacted question/answer consent. CAUTION: can contain PII",
default=False, action="store_true"
)
parser.add_argument("--import-results", help="import results from a previous export and use to update RDR data")
parser.add_argument("--export-to", help="Export results to a tsv file", metavar='OUTPUT_TSV_FILE',
type=str, default=None)
args = parser.parse_args()
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
ids = None
if hasattr(args, 'import_results') and not args.import_results:
if hasattr(args, 'from_file') and args.from_file:
ids = get_id_list(args.from_file)
else:
ids = list([int(args.id) if args.id else None])
process = TheBasicsAnalyzerClass(args, gcp_env, ids)
exit_code = process.run()
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
|
{
"content_hash": "dd941df08d10399a6aba625ab4ceb67d",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 120,
"avg_line_length": 54.64079422382672,
"alnum_prop": 0.5913911003931155,
"repo_name": "all-of-us/raw-data-repository",
"id": "ede18d6778ba6901ea6a96726787be9ceccdef60",
"size": "30397",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/tools/tool_libs/thebasics_analyzer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import testlib
class ModularInputKindTestCase(testlib.SDKTestCase):
def setUp(self):
super(ModularInputKindTestCase, self).setUp()
self.uncheckedRestartSplunk()
def test_lists_modular_inputs(self):
if self.service.splunk_version[0] < 5:
print "Modular inputs don't exist prior to Splunk 5.0. Skipping."
return
elif not self.app_collection_installed():
print "Test requires sdk-app-collection. Skipping."
return
else:
# Install modular inputs to list, and restart
# so they'll show up.
self.install_app_from_collection("modular-inputs")
self.uncheckedRestartSplunk()
inputs = self.service.inputs
if ('abcd','test2') not in inputs:
inputs.create('abcd', 'test2', field1='boris')
input = inputs['abcd', 'test2']
self.assertEqual(input.field1, 'boris')
for m in self.service.modular_input_kinds:
self.check_modular_input_kind(m)
def check_modular_input_kind(self, m):
print m.name
if m.name == 'test1':
self.assertEqual('Test "Input" - 1', m['title'])
self.assertEqual("xml", m['streaming_mode'])
elif m.name == 'test2':
self.assertEqual('test2', m['title'])
self.assertEqual('simple', m['streaming_mode'])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "257b3b8e228590b958a7cc85b9f173c6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 34.44444444444444,
"alnum_prop": 0.5858064516129032,
"repo_name": "lowtalker/splunk-sdk-python",
"id": "eb0c7811003ed88695b9bafdd4d747c006236cbf",
"size": "2156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_modular_input.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "628085"
}
],
"symlink_target": ""
}
|
import sys
import logging
from PyQt5.QtCore import Qt, QFile, QIODevice, QCoreApplication
from PyQt5.QtGui import QFontDatabase, QFont, QKeySequence, QColor, QPixmap
from PyQt5.QtWidgets import QApplication, QDialog, QShortcut, QMessageBox, QStyle
from src.model.application_settings import ApplicationSettings
from src.view.main_window import MainWindow
from src.model.sprite import Sprite
from src.model.resources_cache import ResourcesCache
import src.model.appdata as appdata
import src.helpers.utils as utils
class Application(QApplication):
resources = {}
# TODO add indication if file is modified / saved
# TODO Decide on resizing logistic
# TODO Add tolerance to Filler
# TODO Layers: Add Change Opacity, Visibility
# TODO Add Import from Spritesheets
# TODO Finish Basic Ink Functionality
# TODO Finish Color Palette
# TODO Handle error on loading images/sprites
# TODO [Post 1.0] Add Effects Support
# TODO [Post 1.0] Add More Tools : Move, Square, Circle, Line, Color Replacer, Text
# TODO [Post 1.0] Add More Inks: Add, Bright, Dark, Tile, Grain, H. Grad, V. Grad, Jumnble, Sweep
def __init__(self, args):
super(Application, self).__init__(args)
QCoreApplication.setOrganizationName("Rafael Vasco")
QCoreApplication.setOrganizationDomain("rafaelvasco.com")
QCoreApplication.setApplicationName("SpriteMator")
logging.basicConfig(
filename='log.txt',
filemode='w',
format='%(asctime)s :: %(levelname)s :: %(message)s',
level=logging.DEBUG
)
self._load_assets()
self._mainWindow = MainWindow()
# Activate MainWindow's global event filter
self.installEventFilter(self._mainWindow)
# Open window on screen center
self._mainWindow.setGeometry(
QStyle.alignedRect(Qt.LeftToRight, Qt.AlignCenter, self._mainWindow.size(),
self.desktop().availableGeometry()))
self._shortCuts = {}
self._settings = ApplicationSettings()
self._init_shortcuts()
self.setQuitOnLastWindowClosed(True)
self._currentSprite = None
self._connect_with_window_actions()
# Load Stylesheet
style_file = QFile(':/styles/style')
style_file.open(QIODevice.ReadOnly)
if style_file.isOpen():
self.setStyleSheet(str(style_file.readAll(), encoding='ascii'))
style_file.close()
self.setFont(ResourcesCache.get('SmallFont'))
self._mainWindow.show()
self._settings.load_settings()
self._update_top_menu()
sys.exit(self.exec_())
# ---------------------------------------------------------------------
def new_sprite(self):
if self._mainWindow.new_sprite_dialog.exec_() == QDialog.Accepted:
result = self._mainWindow.new_sprite_dialog.result()
if self._currentSprite is not None:
self.close_sprite()
sprite = Sprite.create(result.choosen_width, result.choosen_height)
self.set_sprite(sprite)
self._update_top_menu()
def set_sprite(self, sprite):
self._currentSprite = sprite
self._mainWindow.canvas.set_sprite(self._currentSprite)
self._mainWindow.animation_display.set_sprite(self._currentSprite)
self._mainWindow.animation_manager.set_sprite(self._currentSprite)
self._mainWindow.layer_manager.set_sprite(self._currentSprite)
self._mainWindow.show_workspace()
def load_sprite(self):
last_opened_folder = self._settings.settings_map["last_folder_path"].value
sprite_file = utils.show_open_file_dialog('Open Sprite:',
'Sprite (*.spr)',
last_opened_folder)
if sprite_file:
sprite = Sprite.load_from_file(sprite_file)
self.set_sprite(sprite)
self._update_top_menu()
new_opened_folder = utils.get_folder_path_from_filepath(sprite_file)
if new_opened_folder != last_opened_folder:
self._settings.settings_map["last_folder_path"].value = new_opened_folder
def import_sprite(self):
last_opened_folder = self._settings.settings_map["last_folder_path"].value
image_files = utils.show_open_files_dialog('Select one or more images:',
'PNG Image (*.png)', last_opened_folder)
if len(image_files) > 0:
sprite = Sprite.import_from_image_files(image_files)
if sprite:
self.set_sprite(sprite)
self._update_top_menu()
new_opened_folder = utils.get_folder_path_from_filepath(image_files[0])
if new_opened_folder != last_opened_folder:
self._settings.settings_map["last_folder_path"].value = new_opened_folder
def save_sprite(self):
if self._currentSprite is None:
return
if self._currentSprite.file_path:
save_path = self._currentSprite.file_path
else:
last_opened_folder = self._settings.settings_map["last_folder_path"].value
save_path = utils.show_save_file_dialog('Save Sprite...',
'Sprite (*.spr)',
last_opened_folder)
if save_path is not None and len(save_path) > 0:
Sprite.save(self._currentSprite, save_path)
def save_sprite_as(self):
if self._currentSprite is None:
return
last_opened_path = self._settings.settings_map["last_folder_path"].value
new_save_path = utils.show_save_file_dialog('Save Sprite As...',
'Sprite (*.spr)',
last_opened_path)
if new_save_path:
Sprite.save(self._currentSprite, new_save_path)
self.close_sprite()
new_sprite = Sprite.load_from_file(new_save_path)
self.set_sprite(new_sprite)
def export_sprite(self):
if self._currentSprite is None:
return
last_opened_path = self._settings.settings_map["last_folder_path"].value
target_folder = utils.show_save_to_folder_dialog(
'Choose a folder to save Sprite animations:',
last_opened_path)
if target_folder:
try:
# Sprite.export(self._current_sprite, target_folder)
Sprite.export_to_spritesheet(self._currentSprite, target_folder)
except Exception as e:
self._raise_error('exportSprite', e)
return
utils.show_info_message(self._mainWindow, 'Info', 'Sprite Exported Successfuly.')
def close_sprite(self):
# TODO Save Sprite Before Close Test
self._mainWindow.canvas.unload_sprite()
self._mainWindow.animation_display.unload_sprite()
self._mainWindow.layer_manager.clear()
self._mainWindow.animation_manager.clear()
self._currentSprite = None
self._mainWindow.hide_workspace()
self._update_top_menu()
def terminate(self):
self.removeEventFilter(self._mainWindow)
self.close_sprite()
self._mainWindow.close()
def toggle_back_light(self):
self._mainWindow.canvas.toggle_backlight()
# -------------------------------------------------------------------------
def _connect_with_window_actions(self):
self._mainWindow.actionNew.triggered.connect(self.new_sprite)
self._mainWindow.actionOpen.triggered.connect(self.load_sprite)
self._mainWindow.actionImport.triggered.connect(self.import_sprite)
self._mainWindow.actionSave.triggered.connect(self.save_sprite)
self._mainWindow.actionSaveAs.triggered.connect(self.save_sprite_as)
self._mainWindow.actionExport.triggered.connect(self.export_sprite)
self._mainWindow.actionClose.triggered.connect(self.close_sprite)
self._mainWindow.actionQuit.triggered.connect(self.terminate)
self._mainWindow.closed.connect(self._on_window_close)
# -------------------------------------------------------------------------
@staticmethod
def _load_assets():
# Fonts #
QFontDatabase.addApplicationFont(":/fonts/font_nokia")
QFontDatabase.addApplicationFont(":/fonts/font_flx")
default_font = QFont("Nokia Cellphone FC")
default_font.setPointSize(12)
small_font = QFont("flxpixl")
small_font.setPointSize(12)
ResourcesCache.register_resource("BigFont", default_font)
ResourcesCache.register_resource("SmallFont", small_font)
# Pixmaps #
checker_tile_light = utils.generate_checkerboard_tile(8, QColor(238, 238, 238),
QColor(255, 255, 255))
checker_tile_dark = utils.generate_checkerboard_tile(8, QColor(59, 59, 59),
QColor(63, 63, 63))
ResourcesCache.register_resource("CheckerTileLight", checker_tile_light)
ResourcesCache.register_resource("CheckerTileDark", checker_tile_dark)
tool_cursor_1 = QPixmap(':/images/tool_cursor_1')
ResourcesCache.register_resource('ToolCursor1', tool_cursor_1)
def _init_shortcuts(self):
shortcut_data = appdata.shortcuts
for holder, shortCutGroup in shortcut_data.items():
self._shortCuts[holder] = {}
for shortCutName, shortCutText in shortCutGroup.items():
shortcut = self._shortCuts[holder][shortCutName] = QShortcut(
QKeySequence(shortCutText), self._mainWindow)
shortcut.setAutoRepeat(False)
shortcut.activated.connect(
lambda h=holder, n=shortCutName: self._on_shortcut_triggered(h, n))
def _on_shortcut_triggered(self, holder, shortcut_name):
# APPLICATION
if holder == 'APPLICATION':
target = self
if shortcut_name == 'TOGGLE_LUMINOSITY':
target.toggle_back_light()
# CANVAS
elif holder == 'CANVAS':
target = self._mainWindow.canvas
if shortcut_name == 'TOGGLE_VIEW':
target.toggle_view()
elif shortcut_name == "TOGGLE_GRID":
target.toogle_grid()
elif shortcut_name == 'TOGGLE_FIT_IN_VIEW':
target.toggle_fit_in_view()
elif shortcut_name == 'CLEAR':
target.clear()
# ANIMATION MANAGER
elif holder == 'ANIMATION_MANAGER':
target = self._mainWindow.animation_manager
if shortcut_name == 'GO_PREV_FRAME':
target.go_to_previous_frame()
elif shortcut_name == 'GO_NEXT_FRAME':
target.go_to_next_frame()
# COLOR PICKER
elif holder == 'COLORPICKER':
target = self._mainWindow.color_picker
if shortcut_name == 'SWITCH_COLOR':
target.switch_active_color()
# TOOLBOX
elif holder == 'TOOLBOX':
target = self._mainWindow.tool_box
if shortcut_name == 'TOOL_SLOT_0':
target.switch_tool_slot(0)
elif shortcut_name == 'TOOL_SLOT_1':
target.switch_tool_slot(1)
elif shortcut_name == 'TOOL_SLOT_2':
target.switch_tool_slot(2)
elif shortcut_name == 'TOOL_SLOT_3':
target.switch_tool_slot(3)
def _on_window_close(self):
self._settings.write_settings()
def _raise_error(self, source, exception):
message = str(exception)
logging.error('[{0}] {1}'.format(source, message))
QMessageBox.warning(self._mainWindow, 'Warning',
'[{0}] An error has ocurred: {1}'.format(source, message))
def _update_top_menu(self):
if self._currentSprite is not None:
self._mainWindow.actionNew.setEnabled(True)
self._mainWindow.actionClose.setEnabled(True)
self._mainWindow.actionSave.setEnabled(True)
self._mainWindow.actionSaveAs.setEnabled(True)
self._mainWindow.actionOpen.setEnabled(True)
self._mainWindow.actionImport.setEnabled(True)
self._mainWindow.actionExport.setEnabled(True)
else:
self._mainWindow.actionNew.setEnabled(True)
self._mainWindow.actionClose.setEnabled(False)
self._mainWindow.actionSave.setEnabled(False)
self._mainWindow.actionSaveAs.setEnabled(False)
self._mainWindow.actionOpen.setEnabled(True)
self._mainWindow.actionImport.setEnabled(True)
self._mainWindow.actionExport.setEnabled(False)
# =============================================================================
if __name__ == '__main__':
application = Application(sys.argv)
ResourcesCache.dispose()
|
{
"content_hash": "682977e757adedf54e23293c948d76dd",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 101,
"avg_line_length": 31.896882494004796,
"alnum_prop": 0.5849184271859259,
"repo_name": "rafaelvasco/SpriteMator",
"id": "8ac1054b88780ca95206fc51623952bcbbf4cecd",
"size": "13620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "382505"
}
],
"symlink_target": ""
}
|
"""This code example gets all active activity groups.
To create activity groups, run create_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ActivityGroupService.getActivityGroupsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201502')
# Create statement object to only select active activity groups.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get activity groups by statement.
while True:
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for activity_group in response['results']:
print ('Activity group with ID \'%s\' and name \'%s\' was found.'
% (activity_group['id'], activity_group['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "7c41514df9f26d65f9b0cf685140cb9a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.982142857142858,
"alnum_prop": 0.669164265129683,
"repo_name": "cctaylor/googleads-python-lib",
"id": "eb8d380e3623b5cf9d2275fcfc68e7f5f2ac0844",
"size": "2353",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/dfp/v201502/activity_group_service/get_active_activity_groups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2774292"
}
],
"symlink_target": ""
}
|
import os
from mypsp import create_app, db
from mypsp.models import TimeRecord
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, TimeRecord=TimeRecord)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "391979654c69f83896204af089aa28e2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 26.35,
"alnum_prop": 0.7381404174573055,
"repo_name": "benmurray/myPSP",
"id": "75aeee6e4df25dbcd9e67c1306f7f55a84f7aead",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "51"
},
{
"name": "HTML",
"bytes": "13497"
},
{
"name": "JavaScript",
"bytes": "104893"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "8793"
}
],
"symlink_target": ""
}
|
"""Helper script used for creating a new release branch on GitHub.
ONLY RELEASE COORDINATORS SHOULD USE THIS SCRIPT.
Usage: Run this script from your oppia root folder:
python scripts/cut_release_branch.py --version="x.y.z"
where x.y.z is the new version of Oppia, e.g. 2.5.3.
"""
import argparse
import json
import os
import re
import subprocess
import urllib
import common # pylint: disable=relative-import
def new_version_type(arg, pattern=re.compile(r'\d\.\d\.\d')):
if not pattern.match(arg):
raise argparse.ArgumentTypeError(
'The format of \'new_version\' should be: x.x.x')
return arg
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--new_version', help='new version to be released', type=new_version_type)
PARSED_ARGS = _PARSER.parse_args()
if PARSED_ARGS.new_version:
TARGET_VERSION = PARSED_ARGS.new_version
else:
raise Exception('ERROR: A \'new_version\' arg must be specified.')
# Construct the new branch name.
NEW_BRANCH_NAME = 'release-%s' % TARGET_VERSION
NEW_APP_YAML_VERSION = TARGET_VERSION.replace('-', '.')
def _get_remote_alias():
# Find the correct alias for the remote branch.
git_remote_output = subprocess.check_output(
['git', 'remote', '-v']).split('\n')
remote_alias = None
for line in git_remote_output:
if 'https://github.com/oppia/oppia' in line:
remote_alias = line.split()[0]
if remote_alias is None:
raise Exception(
'ERROR: There is no existing remote alias for the Oppia repo.')
return remote_alias
def _verify_local_repo_is_clean():
"""Checks that the local Git repo is clean."""
git_status_output = subprocess.check_output(
['git', 'status']).strip().split('\n')
branch_is_clean = (
git_status_output[1] == 'nothing to commit, working directory clean')
if len(git_status_output) > 2 or not branch_is_clean:
raise Exception(
'ERROR: This script should be run from a clean branch.')
def _verify_current_branch_is_develop():
"""Checks that the user is on the develop branch."""
git_status_output = subprocess.check_output(
['git', 'status']).strip().split('\n')
if git_status_output[0] != 'On branch develop':
raise Exception(
'ERROR: This script can only be run from the "develop" branch.')
def _verify_target_branch_does_not_already_exist(remote_alias):
"""Checks that the new release branch doesn't already exist locally or
remotely.
"""
git_branch_output = subprocess.check_output(['git', 'branch'])
if NEW_BRANCH_NAME in git_branch_output:
raise Exception(
'ERROR: The target branch name already exists locally. '
'Run "git branch -D %s" to delete it.' % NEW_BRANCH_NAME)
git_ls_remote_output = subprocess.check_output(
['git', 'ls-remote', '--heads', remote_alias])
remote_branch_ref = 'refs/heads/%s' % NEW_BRANCH_NAME
if remote_branch_ref in git_ls_remote_output:
raise Exception(
'ERROR: The target branch name already exists on the remote repo.')
def _verify_target_version_is_consistent_with_latest_released_version():
"""Checks that the target version is consistent with the latest released
version on GitHub.
"""
response = urllib.urlopen(
'https://api.github.com/repos/oppia/oppia/releases/latest')
if response.getcode() != 200:
raise Exception(
'ERROR: Failed to fetch latest release info from GitHub')
data = json.load(response)
latest_release_tag_name = data['tag_name']
match_result = re.match(r'v(\d)\.(\d)\.(\d)', latest_release_tag_name)
if match_result is None:
raise Exception(
'ERROR: Could not parse version number of latest GitHub release.')
prev_major, prev_minor, prev_patch = match_result.group(1, 2, 3)
match_result = re.match(r'(\d)\.(\d)\.(\d)', TARGET_VERSION)
curr_major, curr_minor, curr_patch = match_result.group(1, 2, 3)
# This will need to be overridden if the major version changes.
assert prev_major == curr_major, 'Unexpected major version change.'
if prev_minor == curr_minor:
assert int(curr_patch) == int(prev_patch) + 1
assert int(curr_patch) < 10
else:
assert int(curr_minor) == int(prev_minor) + 1
assert int(curr_patch) == 0
def _execute_branch_cut():
# Check that the current directory is correct.
common.require_cwd_to_be_oppia()
_verify_local_repo_is_clean()
_verify_current_branch_is_develop()
# Update the local repo.
remote_alias = _get_remote_alias()
subprocess.call(['git', 'pull', remote_alias])
_verify_target_branch_does_not_already_exist(remote_alias)
_verify_target_version_is_consistent_with_latest_released_version()
# Cut a new release branch.
print 'Cutting a new release branch: %s' % NEW_BRANCH_NAME
subprocess.call(['git', 'checkout', '-b', NEW_BRANCH_NAME])
# Update the version in app.yaml.
print 'Updating the version number in app.yaml ...'
with open('app.yaml', 'r') as f:
content = f.read()
assert content.count('version: default') == 1
os.remove('app.yaml')
content = content.replace(
'version: default', 'version: %s' % NEW_APP_YAML_VERSION)
with open('app.yaml', 'w+') as f:
f.write(content)
print 'Version number updated.'
# Make a commit.
print 'Committing the change.'
subprocess.call([
'git', 'commit', '-a', '-m',
'"Update version number to %s"' % TARGET_VERSION])
# Push the new release branch to GitHub.
print 'Pushing new release branch to GitHub.'
subprocess.call(['git', 'push', remote_alias, NEW_BRANCH_NAME])
print ''
print (
'New release branch successfully cut. You are now on branch %s' %
NEW_BRANCH_NAME)
print 'Done!'
if __name__ == '__main__':
_execute_branch_cut()
|
{
"content_hash": "23387738e9d206ba69fa2bf0ceba9a21",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 79,
"avg_line_length": 34.12571428571429,
"alnum_prop": 0.6450100468854655,
"repo_name": "terrameijar/oppia",
"id": "59f8f920675f41aeb4f2b6ac9e4d54003c0fc456",
"size": "6577",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/cut_release_branch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "95342"
},
{
"name": "HTML",
"bytes": "850374"
},
{
"name": "JavaScript",
"bytes": "2597367"
},
{
"name": "Python",
"bytes": "3177521"
},
{
"name": "Shell",
"bytes": "46904"
}
],
"symlink_target": ""
}
|
import sys, os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class BestWindow(QWidget):
def __init__(self, listView, bestexplist, path=os.getcwd()):
super(BestWindow, self).__init__()
self.listView = listView
self.resize(680, 110)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setStyleSheet("QScrollBar{width:0;height:0}")
lo = QGridLayout()
self.table = MyTable(self.listView, self, bestexplist, path)
self.table.move(10,15)
lo.addWidget(self.table)
self.setLayout(lo)
self.move(listView.x()+240, listView.y()+530)
self.setStyleSheet("QTableWidget{background: #74C5FA;border:2px groove gray;border-radius:10px;padding:2px 4px;}"
"QLabel{background: white;color: blue;border-width: 2px; border-style: solid;border-color: #74C5FA;border-radius:2px}")
class MyLabel(QLabel):
def __init__(self, img, mytable, window, listView):
super(MyLabel, self).__init__()
self.listView = listView
self.mytable = mytable
self.window = window
self.img = img
self.expwid = 110
pixmap = QPixmap(img)
pixmap = pixmap.scaledToWidth(self.expwid)
self.setPixmap(pixmap)
def contextMenuEvent(self,event):
delExp = QAction(QIcon('icons/delete.png'),u'删除',self)
delExp.triggered.connect(self.delExpItem)#选中就会触发
menu = QMenu()
menu.addAction(delExp)
menu.exec_(QCursor.pos())#全局位置比较好,使用e.pos()还得转换
event.accept() #禁止弹出菜单事件传递到父控件中
def delExpItem(self,b):
os.remove(self.img)
self.window.close()
self.listView.expcalling = False
class MyTable(QTableWidget):
def __init__(self, listView, window, bestexplist, path = os.getcwd()):
super(MyTable, self).__init__()
self.columncount = 6
self.labels = []
self.path = path
self.window = window
self.listView = listView
self.piclist = bestexplist
self.setFixedSize(670, 120)
self.setFrameShape(QFrame.NoFrame)
self.setShowGrid(False)
self.fillTable(path)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet("QScrollBar{width:0;height:0}")
def fillTable(self, path):
self.rowcount = 1
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setVisible(False)
self.setRowCount(self.rowcount)
self.setColumnCount(self.columncount)
self.setShowGrid(False) # hide the edge
count = 0
self.imglist = []
for picture in self.piclist:
# img = picture
self.imglist.append(picture)
# self.labels.append(MyLabel(img, self, self.window, self.listView))
self.setCellWidget(count//self.columncount, count%self.columncount,\
MyLabel(self.imglist[count], self, self.window, self.listView)) #self.labels[count])
count += 1
self.resizeColumnsToContents() #将列调整到跟内容大小相匹配
self.resizeRowsToContents() #将行大小调整到跟内容的大学相匹配
self.move(0, 0)
self.cellClicked.connect(self.on_click_del_table)
self.show()
def on_click_del_table(self, row, col):
self.listView.expcalling = False
self.listView.addImageMsg(self.imglist[col], False)
self.listView.bestwindow.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyTable('/home/qjy/Desktop/img/19/')
# ex.paintEvent()
ex.show()
sys.exit(app.exec_())
|
{
"content_hash": "0bd7b719d5472b357ebfe1435e0bfd81",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 125,
"avg_line_length": 30.01834862385321,
"alnum_prop": 0.7258557457212714,
"repo_name": "earlybackhome/easy-expression",
"id": "05acb5d53d378d59ca89823f65153de7d395c21e",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/QQqt4/bestexplist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86429"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import dist_ctr_reader
from test_dist_base import TestDistRunnerBase, runtime_main
IS_SPARSE = True
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestDistCTR2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta()
""" network definition """
dnn_data = fluid.layers.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False)
# build dnn model
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=IS_SPARSE)
dnn_pool = fluid.layers.sequence_pool(
input=dnn_embedding, pool_type="sum")
dnn_out = dnn_pool
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
# build lr model
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=IS_SPARSE)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
acc = fluid.layers.accuracy(input=predict, label=label)
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,
label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
inference_program = paddle.fluid.default_main_program().clone()
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001)
sgd_optimizer.minimize(avg_cost)
dataset = dist_ctr_reader.Dataset()
train_reader = paddle.batch(dataset.train(), batch_size=batch_size)
test_reader = paddle.batch(dataset.test(), batch_size=batch_size)
return inference_program, avg_cost, train_reader, test_reader, None, predict
if __name__ == "__main__":
runtime_main(TestDistCTR2x2)
|
{
"content_hash": "7c3be04aff230755c585f02ab038fae7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 84,
"avg_line_length": 35.33684210526316,
"alnum_prop": 0.5677688412272862,
"repo_name": "reyoung/Paddle",
"id": "902dc6544ed6858c4cd8d64b14d6af2367059091",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/dist_ctr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274815"
},
{
"name": "C++",
"bytes": "9634861"
},
{
"name": "CMake",
"bytes": "321482"
},
{
"name": "Cuda",
"bytes": "1290076"
},
{
"name": "Dockerfile",
"bytes": "8631"
},
{
"name": "Go",
"bytes": "109508"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "4853892"
},
{
"name": "Shell",
"bytes": "170766"
}
],
"symlink_target": ""
}
|
import asyncio
import collections
import io
import json
import sys
import traceback
import warnings
from http.cookies import CookieError, Morsel
from urllib.request import getproxies
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, helpers, http, payload
from .client_exceptions import (ClientConnectionError, ClientOSError,
ClientResponseError)
from .formdata import FormData
from .helpers import PY_35, HeadersMixin, SimpleCookie, TimerNoop, noop
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, PayloadWriter
from .log import client_logger
from .streams import FlowControlStreamReader
try:
import cchardet as chardet
except ImportError: # pragma: no cover
import chardet
__all__ = ('ClientRequest', 'ClientResponse')
RequestInfo = collections.namedtuple(
'RequestInfo', ('url', 'method', 'headers'))
class ClientRequest:
GET_METHODS = {hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union(
{hdrs.METH_DELETE, hdrs.METH_TRACE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(self, method, url, *,
params=None, headers=None, skip_auto_headers=frozenset(),
data=None, cookies=None,
auth=None, version=http.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None,
proxy=None, proxy_auth=None, proxy_from_env=False,
timer=None, session=None):
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
self._session = session
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.url = url.with_fragment(None)
self.original_url = url
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
self.response_class = response_class or ClientResponse
self._timer = timer if timer is not None else TimerNoop()
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_from_env)
self.update_body_from_data(data)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
@property
def host(self):
return self.url.host
@property
def port(self):
return self.url.port
@property
def request_info(self):
return RequestInfo(self.url, self.method, self.headers)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
# get host/port
if not url.host:
raise ValueError(
"Could not parse hostname from URL '{}'".format(url))
# basic auth info
username, password = url.user, url.password
if username:
self.auth = helpers.BasicAuth(username, password or '')
# Record entire netloc for usage in host header
scheme = url.scheme
self.ssl = scheme in ('https', 'wss')
def update_version(self, version):
"""Convert request version to two elements tuple.
parser HTTP version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_headers(self, headers):
"""Update request headers."""
self.headers = CIMultiDict()
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers):
self.skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers))
used_headers = self.headers.copy()
used_headers.extend(self.skip_auto_headers)
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
# add host
if hdrs.HOST not in used_headers:
netloc = self.url.raw_host
if not self.url.is_default_port():
netloc += ':' + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
for name, value in cookies.items():
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self, data):
"""Set request content encoding."""
if not data:
return
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
if self.compress:
raise ValueError(
'compress can not be set '
'if Content-Encoding header is set')
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if 'chunked' in te:
if self.chunked:
raise ValueError(
'chunked can not be set '
'if "Transfer-Encoding: chunked" header is set')
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
'chunked can not be set '
'if Content-Length header is set')
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
raise TypeError('BasicAuth() tuple is required instead')
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, body):
if not body:
return
# FormData
if isinstance(body, FormData):
body = body()
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
body = FormData(body)()
self.body = body
# enable chunked encoding if needed
if not self.chunked:
if hdrs.CONTENT_LENGTH not in self.headers:
size = body.size
if size is None:
self.chunked = True
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
# set content-type
if (hdrs.CONTENT_TYPE not in self.headers and
hdrs.CONTENT_TYPE not in self.skip_auto_headers):
self.headers[hdrs.CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in self.headers:
self.headers[key] = value
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = helpers.create_future(self.loop)
def update_proxy(self, proxy, proxy_auth, proxy_from_env):
if proxy_from_env and not proxy:
proxy_url = getproxies().get(self.original_url.scheme)
proxy = URL(proxy_url) if proxy_url else None
if proxy and not proxy.scheme == 'http':
raise ValueError("Only http proxies are supported")
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy = proxy
self.proxy_auth = proxy_auth
def keep_alive(self):
if self.version < HttpVersion10:
# keep alive not supported at all
return False
if self.version == HttpVersion10:
if self.headers.get(hdrs.CONNECTION) == 'keep-alive':
return True
else: # no headers means we close for Http 1.0
return False
elif self.headers.get(hdrs.CONNECTION) == 'close':
return False
return True
@asyncio.coroutine
def write_bytes(self, writer, conn):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from writer.drain()
yield from self._continue
try:
if isinstance(self.body, payload.Payload):
yield from self.body.write(writer)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
writer.write(chunk)
yield from writer.write_eof()
except OSError as exc:
new_exc = ClientOSError(
exc.errno,
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
conn.protocol.set_exception(new_exc)
except asyncio.CancelledError as exc:
if not conn.closed:
conn.protocol.set_exception(exc)
except Exception as exc:
conn.protocol.set_exception(exc)
finally:
self._writer = None
def send(self, conn):
# Specify request target:
# - CONNECT request must send authority form URI
# - not CONNECT proxy must send absolute form URI
# - most common is origin form URI
if self.method == hdrs.METH_CONNECT:
path = '{}:{}'.format(self.url.raw_host, self.url.port)
elif self.proxy and not self.ssl:
path = str(self.url)
else:
path = self.url.raw_path
if self.url.raw_query_string:
path += '?' + self.url.raw_query_string
writer = PayloadWriter(conn.writer, self.loop)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.skip_auto_headers and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
# set the connection header
connection = self.headers.get(hdrs.CONNECTION)
if not connection:
if self.keep_alive():
if self.version == HttpVersion10:
connection = 'keep-alive'
else:
if self.version == HttpVersion11:
connection = 'close'
if connection is not None:
self.headers[hdrs.CONNECTION] = connection
# status + headers
status_line = '{0} {1} HTTP/{2[0]}.{2[1]}\r\n'.format(
self.method, path, self.version)
writer.write_headers(status_line, self.headers)
self._writer = helpers.ensure_future(
self.write_bytes(writer, conn), loop=self.loop)
self.response = self.response_class(
self.method, self.original_url,
writer=self._writer, continue100=self._continue, timer=self._timer,
request_info=self.request_info
)
self.response._post_init(self.loop, self._session)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
def terminate(self):
if self._writer is not None:
if not self.loop.is_closed():
self._writer.cancel()
self._writer = None
class ClientResponse(HeadersMixin):
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
content = None # Payload stream
headers = None # Response headers, CIMultiDictProxy
raw_headers = None # Response raw headers, a sequence of pairs
_connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_loop = None
_closed = True # to allow __del__ for non-initialized properly response
_session = None
def __init__(self, method, url, *,
writer=None, continue100=None, timer=None,
request_info=None):
assert isinstance(url, URL)
self.method = method
self.headers = None
self.cookies = SimpleCookie()
self._url = url
self._content = None
self._writer = writer
self._continue = continue100
self._closed = True
self._history = ()
self._request_info = request_info
self._timer = timer if timer is not None else TimerNoop()
@property
def url(self):
return self._url
@property
def url_obj(self):
warnings.warn(
"Deprecated, use .url #1654", DeprecationWarning, stacklevel=2)
return self._url
@property
def host(self):
return self._url.host
@property
def _headers(self):
return self.headers
@property
def request_info(self):
return self._request_info
def _post_init(self, loop, session):
self._loop = loop
self._session = session # store a reference to session #1985
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __del__(self, _warnings=warnings):
if self._loop is None:
return # not started
if self._closed:
return
if self._connection is not None:
self._connection.release()
self._cleanup_writer()
# warn
if __debug__:
if self._loop.get_debug():
_warnings.warn("Unclosed response {!r}".format(self),
ResourceWarning)
context = {'client_response': self,
'message': 'Unclosed response'}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self):
out = io.StringIO()
ascii_encodable_url = str(self.url)
if self.reason:
ascii_encodable_reason = self.reason.encode('ascii',
'backslashreplace') \
.decode('ascii')
else:
ascii_encodable_reason = self.reason
print('<ClientResponse({}) [{} {}]>'.format(
ascii_encodable_url, self.status, ascii_encodable_reason),
file=out)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self):
return self._connection
@property
def history(self):
"""A sequence of of responses, if redirects occurred."""
return self._history
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._closed = False
self._protocol = connection.protocol
self._connection = connection
connection.protocol.set_response_params(
timer=self._timer,
skip_payload=self.method.lower() == 'head',
skip_status_codes=(204, 304),
read_until_eof=read_until_eof)
with self._timer:
while True:
# read response
try:
(message, payload) = yield from self._protocol.read()
except http.HttpProcessingError as exc:
raise ClientResponseError(
self.request_info, self.history,
code=exc.code,
message=exc.message, headers=exc.headers) from exc
if (message.code < 100 or
message.code > 199 or message.code == 101):
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# payload eof handler
payload.on_eof(self._response_eof)
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
# headers
self.headers = CIMultiDictProxy(message.headers)
self.raw_headers = tuple(message.raw_headers)
# payload
self.content = payload
# cookies
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
try:
self.cookies.load(hdr)
except CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
return self
def _response_eof(self):
if self._closed:
return
if self._connection is not None:
# websocket, protocol could be None because
# connection could be detached
if (self._connection.protocol is not None and
self._connection.protocol.upgraded):
return
self._connection.release()
self._connection = None
self._closed = True
self._cleanup_writer()
@property
def closed(self):
return self._closed
def close(self):
if self._closed:
return
self._closed = True
if self._loop is None or self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
self._notify_content()
def release(self):
if self._closed:
return noop()
self._closed = True
if self._connection is not None:
self._connection.release()
self._connection = None
self._cleanup_writer()
self._notify_content()
return noop()
def raise_for_status(self):
if 400 <= self.status:
raise ClientResponseError(
self.request_info,
self.history,
code=self.status,
message=self.reason,
headers=self.headers)
def _cleanup_writer(self):
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
self._session = None
def _notify_content(self):
content = self.content
if content and content.exception() is None and not content.is_eof():
content.set_exception(
ClientConnectionError('Connection closed'))
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
self.release()
@asyncio.coroutine
def read(self):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except:
self.close()
raise
return self._content
def _get_encoding(self):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mtype, stype, _, params = helpers.parse_mimetype(ctype)
encoding = params.get('charset')
if not encoding:
if mtype == 'application' and stype == 'json':
# RFC 7159 states that the default encoding is UTF-8.
encoding = 'utf-8'
else:
encoding = chardet.detect(self._content)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding
@asyncio.coroutine
def text(self, encoding=None, errors='strict'):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
if encoding is None:
encoding = self._get_encoding()
return self._content.decode(encoding, errors=errors)
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads,
content_type='application/json'):
"""Read and decodes JSON response."""
if self._content is None:
yield from self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if content_type not in ctype:
raise ClientResponseError(
self.request_info,
self.history,
message=('Attempt to decode JSON with '
'unexpected mimetype: %s' % ctype),
headers=self.headers)
stripped = self._content.strip()
if not stripped:
return None
if encoding is None:
encoding = self._get_encoding()
return loads(stripped.decode(encoding))
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
# similar to _RequestContextManager, we do not need to check
# for exceptions, response object can closes connection
# is state is broken
self.release()
|
{
"content_hash": "0a71acb11d3f93a9eed7b532ace8bf03",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 79,
"avg_line_length": 32.64846870838881,
"alnum_prop": 0.561727639789551,
"repo_name": "arju88nair/projectCulminate",
"id": "ee810cf45646872aa0f473e447fe6f34301f9021",
"size": "24519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/aiohttp/client_reqrep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "365921"
},
{
"name": "C++",
"bytes": "237910"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Makefile",
"bytes": "90112"
},
{
"name": "Python",
"bytes": "15199371"
},
{
"name": "Shell",
"bytes": "17795"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('compounds', '0002'),
]
operations = [
migrations.AddField(
model_name='compound',
name='parent_compound',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.Compound'),
),
migrations.AlterUniqueTogether(
name='compoundbatch',
unique_together=set([('compound', 'lot_number')]),
),
]
|
{
"content_hash": "797535617e0b5bbc4fc3eac899876fd5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 129,
"avg_line_length": 27.391304347826086,
"alnum_prop": 0.6158730158730159,
"repo_name": "UPDDI/dds-database-server",
"id": "fa2d90c2ac442b5d2be06b36345210eb00fabe86",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compounds/migrations/0003.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "911"
},
{
"name": "HTML",
"bytes": "30160"
},
{
"name": "JavaScript",
"bytes": "13570"
},
{
"name": "Python",
"bytes": "63950"
}
],
"symlink_target": ""
}
|
import hashlib
import random
import datetime
def create_hash(email, secret):
return hashlib.md5(email + secret).hexdigest()
def hash_password(password, salt):
return hashlib.sha256(password + salt).hexdigest()
def create_schedule_time(start, stop):
start_minutes = int(start * 60)
stop_minutes = int(stop * 60)
time = random.randint(start_minutes, stop_minutes)
return datetime.time(hour=time / 60, minute=time % 60)
class Personality(object):
def __init__(self, name, filename):
self.name = name
self.filename = filename
self.messages = self._load()
def _load(self):
messages = []
with open(self.filename, 'r') as f:
for line in f.readlines():
if line[0] == '#':
continue
if len(line) == 0:
continue
messages.append(line)
return messages
def get_message(self):
return random.choice(self.messages)
|
{
"content_hash": "7ad92763bb87895d34d840ccdc6f967f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 58,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.5895895895895896,
"repo_name": "cash/happybot",
"id": "bc76975ee6bbf9b0f2d8190ecf4c99e3f8832cb6",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "happybot/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "320"
},
{
"name": "HTML",
"bytes": "5800"
},
{
"name": "Python",
"bytes": "12614"
}
],
"symlink_target": ""
}
|
"""High-level representations of AMQP protocol objects
"""
import struct
from .serialization import AMQPReader, AMQPWriter
from .spec import FrameType, method_t
from .message import Message
class Frame:
"""AMQP frame
A `Frame` represents the lowest-level packet of data specified by the AMQP 0.9.1
wire-level protocol. All methods and messages are packed into one or more frames before being
sent to the peer.
The format of the AMQP frame is as follows::
offset: 0 1 3 7 size+7 size+8
+------+---------+---------+-------------------+-----------+
| type | channel | size | --- payload --- | frame-end |
+------+---------+---------+-------------------+-----------+
size (bytes) 1 2 4 size 1
"""
def __init__(self, frame_type=None, channel=0, payload=bytes()):
"""Create new Frame
Leave all three parameters as default to create an empty frame whose `data` can be manually
written to afterwards.
:param frame_type: frame type
:param channel: associated channel number
:param payload: frame payload
:type frame_type: int
:type channel: int
:type payload: bytes or bytearray
"""
#: raw frame data; can be manually manipulated at any time
#:
#: :type: bytearray
self.data = bytearray()
self._frame_type = None
self._channel = None
self._payload_size = None
# create bytearray from provided data
if frame_type is not None:
self._frame_type = frame_type
self._channel = channel
self._payload_size = len(payload)
frame_format = '>BHI{}sB'.format(self._payload_size)
self.data = struct.pack(frame_format, frame_type, channel, self._payload_size, payload,
FrameType.END)
@property
def frame_type(self):
"""Get frame type
:return: frame type
:rtype: int
"""
if self._frame_type is not None:
return self._frame_type
else:
self._frame_type = struct.unpack_from('>B', self.data)[0]
return self._frame_type
@property
def channel(self):
"""Get frame channel number
:return: channel number
:rtype: int
"""
if self._channel is not None:
return self._channel
else:
self._channel = struct.unpack_from('>H', self.data, 1)[0]
return self._channel
@property
def payload_size(self):
"""Get frame payload size
:return: payload size
:rtype: int
"""
if self._payload_size is not None:
return self._payload_size
else:
self._payload_size = struct.unpack_from('>I', self.data, 3)[0]
return self._payload_size
@property
def payload(self):
"""Get frame payload
:return: payload
:rtype: bytearray
"""
return self.data[7:-1]
class Method:
"""AMQP method
The AMQP 0.9.1 protocol specifies communication as sending and receiving "methods". Methods
consist of a "class-id" and "method-id" and are represented by a `method_t` namedtuple in amqpy.
Methods are packed into the payload of a `FrameType.METHOD` frame, and most methods can be fully
sent in a single frame. If the method specified to be carrying content (such as a message), the
method frame is followed by additional frames: a `FrameType.HEADER` frame, then zero or more
`FrameType.BODY` frames.
The format of the `FrameType.METHOD` frame's payload is as follows::
offset: 0 2 4
+----------+-----------+-------------- - -
| class-id | method-id | arguments...
+----------+-----------+-------------- - -
size (bytes): 2 2 variable
The format of the `FrameType.HEADER` frame's payload is as follows::
offset: 0 2 4 12 14
+----------+--------+-----------+----------------+------------------- - -
| class-id | weight | body size | property flags | property list...
+----------+--------+-----------+----------------+------------------- - -
size (bytes): 2 2 8 2 variable
The format of the `FrameType.BODY` frame's payload is simply raw binary data of the message
body.
"""
def __init__(self, method_type=None, args=None, content=None, channel_id=None):
"""
:param method_type: method type
:param args: method args
:param content: content
:param channel_id: the associated channel ID, if any
:type method_type: method_t
:type args: AMQPReader or AMQPWriter or None
:type content: amqp.message.GenericContent or None
:type channel_id: int or None
"""
#: :type: amqpy.spec.method_t
self.method_type = method_type
if isinstance(args, (AMQPReader, AMQPWriter)):
self.args = args
elif args is None:
self.args = AMQPWriter()
else:
raise ValueError('args must be an instance of `AMQPReader` or `AMQPWriter`')
#: :type: amqpy.message.GenericContent or None
self.content = content # GenericContent if this method is carrying content
#: :type: int
self.channel_id = channel_id
self._body_bytes = bytearray() # used internally to store encoded GenericContent body
self._expected_body_size = None # set automatically when `load_header_frame()` is called
def load_method_frame(self, frame):
"""Load method frame payload data
This method is intended to be called when constructing a `Method` from incoming data.
After calling, `self.method_type`, `self.args`, and `self.channel_id` will be loaded with
data from the frame.
:param frame: `FrameType.METHOD` frame
:type frame: amqpy.proto.Frame
"""
# noinspection PyTypeChecker
self.method_type = method_t(*struct.unpack('>HH', frame.payload[:4]))
self.args = AMQPReader(frame.payload[4:])
self.channel_id = frame.channel
def load_header_frame(self, frame):
"""Add header to partial method
This method is intended to be called when constructing a `Method` from incoming data.
:param frame: `FrameType.HEADER` frame
:type frame: amqpy.proto.Frame
"""
if not self.content:
self.content = Message()
# noinspection PyTypeChecker
class_id, weight, self._expected_body_size = struct.unpack('>HHQ', frame.payload[:12])
self.content.load_properties(frame.payload[12:])
def load_body_frame(self, frame):
"""Add content to partial method
This method is intended to be called when constructing a `Method` from incoming data.
:param frame: `FrameType.BODY` frame
:type frame: amqpy.proto.Frame
"""
self._body_bytes.extend(frame.payload)
if self.complete:
self.content.body = bytes(self._body_bytes)
@property
def complete(self):
"""Check if the message that is carried by this method has been completely assembled,
i.e. the expected number of bytes have been loaded
This method is intended to be called when constructing a `Method` from incoming data.
:return: True if method is complete, else False
:rtype: bool
"""
return self._expected_body_size == 0 or len(self._body_bytes) == self._expected_body_size
def _pack_method(self):
"""Pack this method into a bytes object suitable for using as a payload for
`FrameType.METHOD` frames
This method is intended to be called when packing an already-completed `Method` into
outgoing frames.
:return: bytes
:rtype: bytes
"""
return struct.pack('>HH', self.method_type.class_id,
self.method_type.method_id) + self.args.getvalue()
def _pack_header(self):
"""Pack this method into a bytes object suitable for using as a payload for
`FrameType.HEADER` frames
This method is intended to be called when packing an already-completed `Method` into
outgoing frames.
:return: bytes
:rtype: bytes
"""
if not self.content:
raise ValueError('`_pack_header()` is only meaningful if there is content to pack')
self._body_bytes = self.content.body
if isinstance(self._body_bytes, str):
# encode body to bytes
coding = self.content.properties.setdefault('content_encoding', 'UTF-8')
self._body_bytes = self.content.body.encode(coding)
properties = self.content.serialize_properties()
return struct.pack('>HHQ', self.method_type.class_id, 0, len(self._body_bytes)) + properties
def _pack_body(self, chunk_size):
"""Pack this method into a bytes object suitable for using as a payload for
`FrameType.BODY` frames
This method is intended to be called when packing an already-completed `Method` into
outgoing frames.
:param chunk_size: split up body into pieces that are at most `chunk_size` bytes each
:type chunk_size: int
:return: bytes generator
:rtype: generator[bytes]
"""
if not self.content:
raise ValueError('`_pack_body()` is only meaningful if there is content to pack')
for i in range(0, len(self._body_bytes), chunk_size):
yield self._body_bytes[i:i + chunk_size]
def dump_method_frame(self):
"""Create a method frame
This method is intended to be called when sending frames for an already-completed `Method`.
:return: `FrameType.METHOD` frame
:rtype: amqpy.proto.Frame
"""
frame = Frame(FrameType.METHOD, self.channel_id, self._pack_method())
return frame
def dump_header_frame(self):
"""Create a header frame
This method is intended to be called when sending frames for an already-completed `Method`.
:return: `FrameType.HEADER` frame
:rtype: amqpy.proto.Frame
"""
frame = Frame(FrameType.HEADER, self.channel_id, self._pack_header())
return frame
def dump_body_frame(self, chunk_size):
"""Create a body frame
This method is intended to be called when sending frames for an already-completed `Method`.
:param chunk_size: body chunk size in bytes; this is typically the maximum frame size - 8
:type chunk_size: int
:return: generator of `FrameType.BODY` frames
:rtype: generator[amqpy.proto.Frame]
"""
for payload in self._pack_body(chunk_size):
frame = Frame(FrameType.BODY, self.channel_id, payload)
yield frame
|
{
"content_hash": "3319fca1ba8b47527bde37579b6971f9",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 100,
"avg_line_length": 36.9640522875817,
"alnum_prop": 0.5740429670232517,
"repo_name": "gst/amqpy",
"id": "9bdcec2f3116f834a973c88d0d9ee68b3112cb1a",
"size": "11311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amqpy/proto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "205190"
}
],
"symlink_target": ""
}
|
"""
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
curSum = maxSum = nums[0]
for num in nums[1:]:
curSum = max(num, curSum + num)
maxSum = max(maxSum, curSum)
return maxSum
if __name__ == "__main__":
sample = Solution()
print(sample.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))
|
{
"content_hash": "f425a5f73b0cb7a2caa842fd87b89639",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 104,
"avg_line_length": 25.25925925925926,
"alnum_prop": 0.5542521994134897,
"repo_name": "Vonzpf/LeetCode",
"id": "6c621d159988a1991a4198c1d5cb069ff47e0e24",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/MaximumSubarray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "25807"
},
{
"name": "Python",
"bytes": "43800"
}
],
"symlink_target": ""
}
|
from frappe.model.document import Document
class UserSocialLogin(Document):
pass
|
{
"content_hash": "144bd5792dd1f423b43ebbc647395f04",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 20.75,
"alnum_prop": 0.8313253012048193,
"repo_name": "almeidapaulopt/frappe",
"id": "80c0c89383dbd4fbc631932405d524aa1964c7bf",
"size": "195",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/user_social_login/user_social_login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
import os
from flask_script import Manager
from fight_simulator import app
from fight_simulator.database import session, Fighter, History, User, Base
from getpass import getpass
from werkzeug.security import generate_password_hash
from flask_migrate import Migrate, MigrateCommand
manager = Manager(app)
@manager.command
def run():
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port)
class DB(object):
def __init__(self, metadata):
self.metadata = metadata
migrate = Migrate(app, DB(Base.metadata))
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
{
"content_hash": "fa833110d0e72a38e6c18caa900cf2db",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7262658227848101,
"repo_name": "tydonk/fight_simulator",
"id": "1785f13259e2e8c095c4b7de50f0810e7c5b62fb",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2256"
},
{
"name": "HTML",
"bytes": "25821"
},
{
"name": "JavaScript",
"bytes": "17978"
},
{
"name": "Python",
"bytes": "43649"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
def migrate_payout_details(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
PlainPayoutAccount = apps.get_model('payouts', 'PlainPayoutAccount')
PayoutDocument = apps.get_model('payouts', 'PayoutDocument')
ContentType = apps.get_model('contenttypes', 'ContentType')
new_ct = ContentType.objects.get_for_model(PlainPayoutAccount)
for project in Project.objects.filter(owner__isnull=False,
account_number__isnull=False).all():
project.payout_account = PlainPayoutAccount.objects.create(
user=project.owner,
account_holder_name=project.account_holder_name,
account_holder_address=project.account_holder_address,
account_holder_postal_code=project.account_holder_postal_code,
account_holder_city=project.account_holder_city,
account_holder_country=project.account_holder_country,
account_number=project.account_number,
account_details=project.account_details,
account_bank_country=project.account_bank_country,
polymorphic_ctype=new_ct
)
project.save()
if project.payout_account and len(project.documents.all()):
document = project.documents.all()[0]
project.payout_account.document = PayoutDocument.objects.create(
payout_account=project.payout_account,
author=document.author,
file=document.file,
created=document.created,
updated=document.updated,
ip_address=document.ip_address
)
project.payout_account.save()
def remove_payout_accounts(apps, schema_editor):
PlainPayoutAccount = apps.get_model('payouts', 'PlainPayoutAccount')
PlainPayoutAccount.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('projects', '0082_auto_20181129_1506'),
('payouts', '0009_payoutdocument')
]
operations = [
migrations.RunPython(migrate_payout_details,
remove_payout_accounts),
]
|
{
"content_hash": "06b089faab4be1415ca67645ba200aae",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 38.55172413793103,
"alnum_prop": 0.6413237924865832,
"repo_name": "onepercentclub/bluebottle",
"id": "83c190ad2f5d6c8a9c787a1ef8ec4329f6b2aea1",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/projects/migrations/0083_auto_20181129_1506.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from pychron.pipeline.editors.audit_editor import AuditEditor
from pychron.pipeline.nodes.base import BaseNode
class AuditNode(BaseNode):
auto_configure = False
name = "Audit"
configurable = False
def run(self, state):
editor = AuditEditor()
editor.set_unks_refs(state.unknowns, state.references)
state.editors.append(editor)
# ============= EOF =============================================
|
{
"content_hash": "528b0181740e922780f929d3c1d357be",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 25.764705882352942,
"alnum_prop": 0.6073059360730594,
"repo_name": "USGSDenverPychron/pychron",
"id": "b938fe27857f4defb71f3eb56375a257a3a5e532",
"size": "1166",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/pipeline/nodes/audit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
from mapmanager import MapManager
from visualizer import MapVisualizer
|
{
"content_hash": "8816c85aeb2130c22729989053a1da6e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 36,
"avg_line_length": 35,
"alnum_prop": 0.9,
"repo_name": "MadeInPierre/RobotOS",
"id": "edeb1adcecfe40efd0fe1cab1217865b2120f7ed",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot_memory_map_old/src/MapManager/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "137388"
},
{
"name": "CMake",
"bytes": "432499"
},
{
"name": "Common Lisp",
"bytes": "156639"
},
{
"name": "JavaScript",
"bytes": "63021"
},
{
"name": "Makefile",
"bytes": "360"
},
{
"name": "NewLisp",
"bytes": "10743"
},
{
"name": "Python",
"bytes": "110585"
},
{
"name": "Shell",
"bytes": "3581"
}
],
"symlink_target": ""
}
|
from . import TEST_FILES
import pytest
from qtpy import QtCore
from pdsspect.pdsspect_image_set import PDSSpectImageSet, SubPDSSpectImageSet
from pdsspect.pdsspect_view import (
PDSSpectView,
PDSSpectViewWidget,
PDSSpectViewController
)
from ginga.Bindings import ScrollEvent
class TestPDSSpectViewController(object):
image_set = PDSSpectImageSet(TEST_FILES)
controller = PDSSpectViewController(image_set, None)
default_center = image_set.center
@pytest.fixture
def test_set(self):
yield self.image_set
self.image_set.zoom = 1
self.image_set.center = self.default_center
def test_init(self, test_set):
assert self.controller.image_set == test_set
assert self.controller.view is None
def test_change_pan_center(self, test_set):
test_set.zoom = 2
assert test_set.center == (16, 32)
self.controller.change_pan_center(24, 48)
assert test_set.center == (24, 48)
self.controller.change_pan_center(8, 16)
assert test_set.center == (8, 16)
self.controller.change_pan_center(16, 32)
assert test_set.center == (16, 32)
test_set.zoom = 1
assert test_set.center == (16, 32)
def test_change_pan_size(self, test_set):
assert test_set.zoom == 1
self.controller.change_pan_size(2)
assert test_set.zoom == 2
self.controller.change_pan_size(1)
assert test_set.zoom == 1
class TestPDSSpectView(object):
image_set = PDSSpectImageSet(TEST_FILES)
view = PDSSpectView(image_set)
@pytest.fixture
def test_set(self, qtbot):
self.view.show()
qtbot.add_widget(self.view)
qtbot.add_widget(self.view.pan_view)
yield self.image_set
self.image_set._current_image_index = 0
self.image_set.images[0].cuts = (None, None)
self.image_set.images[1].cuts = (None, None)
self.view.set_image()
self.image_set._flip_x = False
self.image_set._flip_y = False
self.image_set._swap_xy = False
self.image_set.zoom = 1
def test_set_image(self, qtbot, test_set):
view_image = self.view.view_canvas.get_image()
assert view_image == test_set.current_image
test_set._current_image_index += 1
self.view.set_image()
assert not view_image == test_set.current_image
view_image = self.view.view_canvas.get_image()
assert view_image == test_set.current_image
test_set._current_image_index -= 1
test_set.current_image.cuts = (10, 100)
self.view.set_image()
assert not view_image == test_set.current_image
view_image = self.view.view_canvas.get_image()
assert view_image == test_set.current_image
assert self.view.view_canvas.get_cut_levels() == (10, 100)
def test_set_transforms(self, qtbot, test_set):
assert self.view.view_canvas.get_transforms() == (False, False, False)
assert test_set.transforms == (False, False, False)
test_set._flip_x = True
assert test_set.transforms == (True, False, False)
assert self.view.view_canvas.get_transforms() == (False, False, False)
self.view.set_transforms()
assert self.view.view_canvas.get_transforms() == (True, False, False)
test_set._swap_xy = True
self.view.set_transforms()
assert self.view.view_canvas.get_transforms() == (True, False, True)
test_set._flip_x = False
test_set._swap_xy = False
self.view.set_transforms()
assert self.view.view_canvas.get_transforms() == (False, False, False)
def test_change_zoom(self, qtbot, test_set):
assert float(self.view.zoom_text.text()) == test_set.zoom
self.view.zoom_text.setText('2')
self.view.change_zoom()
assert test_set.zoom == 2.0
self.view.zoom_text.setText('1.00')
qtbot.keyPress(self.view.zoom_text, QtCore.Qt.Key_Return)
assert test_set.zoom == 1.0
self.view.zoom_text.setText('foo')
qtbot.keyPress(self.view.zoom_text, QtCore.Qt.Key_Return)
assert test_set.zoom == 1.0
def test_adjust_pan_size(self, qtbot, test_set):
assert self.view.pan.xradius == 16.5
assert self.view.pan.yradius == 32.5
test_set._zoom = 2
self.view.pan.x = 20
self.view.pan.y = 30
self.view.adjust_pan_size()
assert self.view.pan.xradius == 8.5
assert self.view.pan.yradius == 16.5
assert test_set.center == (20, 30)
test_set._zoom = 1
self.view.adjust_pan_size()
assert self.view.pan.xradius == 16.5
assert self.view.pan.yradius == 32.5
assert test_set.center == (16, 32)
def test_zoom_with_scroll(self, test_set):
foward = ScrollEvent(direction=0.0)
backwards = ScrollEvent(direction=15.0)
assert test_set.zoom == 1.0
self.view.zoom_with_scroll(self.view.view_canvas, foward)
assert test_set.zoom == 2.0
self.view.zoom_with_scroll(self.view.view_canvas, backwards)
assert test_set.zoom == 1.0
self.view.zoom_with_scroll(self.view.view_canvas, backwards)
assert test_set.zoom == 1.0
def test_arrow_key_move_center(self, test_set):
test_set.zoom = 2
default_x, default_y = test_set.center
self.view.arrow_key_move_center(self.view.view_canvas, 'left')
assert test_set.center == (default_x - 1, default_y)
self.view.arrow_key_move_center(self.view.view_canvas, 'right')
assert test_set.center == (default_x, default_y)
self.view.arrow_key_move_center(self.view.view_canvas, 'right')
assert test_set.center == (default_x + 1, default_y)
self.view.arrow_key_move_center(self.view.view_canvas, 'left')
assert test_set.center == (default_x, default_y)
self.view.arrow_key_move_center(self.view.view_canvas, 'up')
assert test_set.center == (default_x, default_y + 1)
self.view.arrow_key_move_center(self.view.view_canvas, 'down')
assert test_set.center == (default_x, default_y)
self.view.arrow_key_move_center(self.view.view_canvas, 'down')
assert test_set.center == (default_x, default_y - 1)
self.view.arrow_key_move_center(self.view.view_canvas, 'up')
assert test_set.center == (default_x, default_y)
def test_change_center(self, qtbot, test_set):
test_set.zoom = 2
assert test_set.center == (16, 32)
self.view.change_center(None, None, 20, 30)
assert test_set.center == (20, 30)
self.view.change_center(None, None, 8, 16)
assert test_set.center == (8, 16)
self.view.change_center(None, None, 16, 32)
assert test_set.center == (16, 32)
test_set.zoom = 1
assert test_set.center == (16, 32)
def test_move_pan(self, qtbot, test_set):
test_set.zoom = 2
test_set._center = (20, 30)
assert self.view.pan.x == 16
assert self.view.pan.y == 32
self.view.move_pan()
assert self.view.pan.x == 20
assert self.view.pan.y == 30
test_set._center = (16, 32)
self.view.move_pan()
assert self.view.pan.x == 16
assert self.view.pan.y == 32
test_set.zoom = 1
class TestPDSSpectViewWidget(object):
image_set = PDSSpectImageSet(TEST_FILES)
@pytest.fixture
def view_widget(self):
self.image_set = PDSSpectImageSet(TEST_FILES)
return PDSSpectViewWidget(self.image_set)
def test_init(self, view_widget):
assert view_widget.image_set == self.image_set
spect_view = view_widget.spect_views[0]
assert view_widget.main_layout.itemAt(0).widget() == spect_view
def test_create_spect_view(self, view_widget):
subset = SubPDSSpectImageSet(self.image_set)
spect_view = view_widget.create_spect_view(subset)
spect_view == view_widget.spect_views[1]
assert view_widget.main_layout.itemAt(1).widget() == spect_view
|
{
"content_hash": "06a03e793296217d54b6811492ab4527",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 78,
"avg_line_length": 39.33658536585366,
"alnum_prop": 0.6243799603174603,
"repo_name": "planetarypy/pdsspect",
"id": "eaecf6b2853a7dc16c673d7408a427ef2444440a",
"size": "8064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pdsspect_view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1735"
},
{
"name": "Python",
"bytes": "319575"
}
],
"symlink_target": ""
}
|
import asyncio
import logging
from collections import defaultdict
from collections import deque
import re
import fnmatch
from .ami_protocol import AMIProtocol
from . import actions
from . import utils
class Manager:
"""Main object:
.. code-block:: python
>>> manager = Manager(
... host='127.0.0.1',
... port=5038,
... ssl=False,
... encoding='utf8')
"""
defaults = dict(
host='127.0.0.1',
port=5038,
events='on',
ssl=False,
encoding='utf8',
ping_delay=10,
ping_interval=10,
reconnect_timeout=2,
protocol_factory=AMIProtocol,
save_stream=None,
loop=None,
forgetable_actions=('ping', 'login'),
)
def __init__(self, **config):
self.config = dict(self.defaults, **config)
self.loop = self.config['loop']
self.log = config.get('log', logging.getLogger(__name__))
self.callbacks = defaultdict(list)
self.protocol = None
self.patterns = []
self.save_stream = self.config.get('save_stream')
self.authenticated = False
self.authenticated_future = None
self.awaiting_actions = deque()
self.forgetable_actions = self.config['forgetable_actions']
self.pinger = None
self.ping_delay = int(self.config['ping_delay'])
self.ping_interval = int(self.config['ping_interval'])
self.reconnect_timeout = int(self.config['reconnect_timeout'])
self._connected = False
self.register_event('FullyBooted', self.send_awaiting_actions)
self.on_login = config.get('on_login', on_login)
self.on_connect = config.get('on_connect', on_connect)
self.on_disconnect = config.get('on_disconnect', on_disconnect)
def connection_made(self, f):
if getattr(self, 'protocol', None):
self.protocol.close()
try:
transport, protocol = f.result()
except OSError: # pragma: no cover
if self._connected:
self.log.exception('Not able to connect')
self._connected = False
else:
self.log.warning('Not able to reconnect')
self.loop.call_later(self.reconnect_timeout, self.connect)
else:
self._connected = True
self.log.debug('Manager connected')
self.loop.call_soon(self.on_connect, self)
self.protocol = protocol
self.protocol.queue = deque()
self.protocol.factory = self
self.protocol.log = self.log
self.protocol.config = self.config
self.protocol.encoding = self.encoding = self.config['encoding']
self.responses = self.protocol.responses = {}
if 'username' in self.config:
self.authenticated = False
self.authenticated_future = self.send_action({
'Action': 'Login',
'Username': self.config['username'],
'Secret': self.config['secret'],
'Events': self.config['events']})
self.authenticated_future.add_done_callback(self.login)
else:
self.log.debug('username not in config file')
self.pinger = self.loop.call_later(self.ping_delay, self.ping)
def login(self, future):
self.authenticated_future = None
resp = future.result()
self.authenticated = bool(resp.success)
if self.authenticated:
self.loop.call_soon(self.on_login, self)
if self.pinger is not None:
self.pinger.cancel()
self.pinger = self.loop.call_later(self.ping_delay, self.ping)
return self.authenticated
def ping(self): # pragma: no cover
self.pinger = self.loop.call_later(self.ping_interval, self.ping)
self.protocol.send({'Action': 'Ping'})
async def send_awaiting_actions(self, *_):
self.log.info('Sending awaiting actions')
while self.awaiting_actions:
action = self.awaiting_actions.popleft()
if action['action'].lower() not in self.forgetable_actions:
if not action.done():
self.send_action(action, as_list=action.as_list)
def send_action(self, action, as_list=None, **kwargs):
"""Send an :class:`~panoramisk.actions.Action` to the server:
:param action: an Action or dict with action name and parameters to
send
:type action: Action or dict or Command
:param as_list: If True, the action will retrieve all responses
:type as_list: boolean
:return: an Action that will receive the response(s)
:rtype: panoramisk.actions.Action
:Example:
To retrieve answer::
manager = Manager()
resp = await manager.send_action({'Action': 'Status'})
Or with an async for::
manager = Manager()
async for resp in manager.send_action({'Action': 'Status'}):
print(resp)
See https://wiki.asterisk.org/wiki/display/AST/AMI+Actions for
more information on actions
"""
action.update(kwargs)
return self.protocol.send(action, as_list=as_list)
def send_command(self, command, as_list=False):
"""Send a :class:`~panoramisk.actions.Command` to the server::
manager = Manager()
resp = await manager.send_command('http show status')
Return a response :class:`~panoramisk.message.Message`.
See https://wiki.asterisk.org/wiki/display/AST/ManagerAction_Command
"""
action = actions.Action({'Command': command, 'Action': 'Command'},
as_list=as_list)
return self.send_action(action)
def send_agi_command(self, channel, command, as_list=False):
"""Send a :class:`~panoramisk.actions.Command` to the server:
:param channel: Channel name where to launch command.
Ex: 'SIP/000000-00000a53'
:type channel: String
:param command: command to launch. Ex: 'GET VARIABLE async_agi_server'
:type command: String
:param as_list: If True, the action Future will retrieve all responses
:type as_list: boolean
:return: a Future that will receive the response
:rtype: asyncio.Future
:Example:
::
manager = Manager()
resp = manager.send_agi_command('SIP/000000-00000a53',
'GET VARIABLE async_agi_server')
Return a response :class:`~panoramisk.message.Message`.
See https://wiki.asterisk.org/wiki/display/AST/Asterisk+11+ManagerAction_AGI
"""
action = actions.Command({'Action': 'AGI',
'Channel': channel,
'Command': command},
as_list=as_list)
return self.send_action(action)
def connect(self, run_forever=False, on_startup=None, on_shutdown=None):
"""connect to the server"""
if self.loop is None: # pragma: no cover
self.loop = asyncio.get_event_loop()
t = asyncio.Task(
self.loop.create_connection(
self.config['protocol_factory'],
self.config['host'], self.config['port'],
ssl=self.config['ssl']),
loop=self.loop)
t.add_done_callback(self.connection_made)
if run_forever:
self.run_forever(on_startup, on_shutdown)
return t
def run_forever(self, on_startup, on_shutdown):
"""Start loop forever"""
try:
if on_startup:
self.loop.run_until_complete(on_startup(self))
self.loop.run_forever()
except (KeyboardInterrupt, SystemExit):
self.close()
finally:
if on_shutdown:
self.loop.run_until_complete(on_shutdown(self))
self.loop.stop()
def register_event(self, pattern, callback=None):
"""register an event. See :class:`~panoramisk.message.Message`:
.. code-block:: python
>>> def callback(manager, event):
... print(manager, event)
>>> manager = Manager()
>>> manager.register_event('Meetme*', callback)
<function callback at 0x...>
You can also use the manager as a decorator:
.. code-block:: python
>>> manager = Manager()
>>> @manager.register_event('Meetme*')
... def callback(manager, event):
... print(manager, event)
"""
def _register_event(callback):
if not self.callbacks[pattern]:
self.patterns.append((pattern,
re.compile(fnmatch.translate(pattern))))
self.callbacks[pattern].append(callback)
return callback
if callback is not None:
return _register_event(callback)
else:
return _register_event
def dispatch(self, event):
matches = []
event.manager = self
for pattern, regexp in self.patterns:
match = regexp.match(event.event)
if match is not None:
matches.append(pattern)
for callback in self.callbacks[pattern]:
ret = callback(self, event)
if (asyncio.iscoroutine(ret) or
isinstance(ret, asyncio.Future)):
asyncio.ensure_future(ret, loop=self.loop)
return matches
def close(self):
"""Close the connection"""
if self.pinger:
self.pinger.cancel()
self.pinger = None
if getattr(self, 'protocol', None):
self.protocol.close()
def connection_lost(self, exc):
self._connected = False
self.log.error('Connection lost')
self.loop.call_soon(self.on_disconnect, self, exc)
if self.pinger:
self.pinger.cancel()
self.pinger = None
self.log.info('Try to connect again in %d second(s)' % self.reconnect_timeout)
self.loop.call_later(self.reconnect_timeout, self.connect)
@classmethod
def from_config(cls, filename_or_fd, section='asterisk', **kwargs):
config = utils.config(filename_or_fd, section=section)
config.update(kwargs)
return cls(**config)
# noinspection PyUnusedLocal
def on_connect(manager: Manager):
"""
Callback after connect
"""
pass
# noinspection PyUnusedLocal
def on_login(manager: Manager):
"""
Callback after login
"""
pass
# noinspection PyUnusedLocal
def on_disconnect(manager: Manager, exc: Exception):
"""
Callback after disconnect
"""
pass
|
{
"content_hash": "39c5b49ef5a376765a2150db7c13a0e8",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 86,
"avg_line_length": 35.00319488817891,
"alnum_prop": 0.5692771084337349,
"repo_name": "gawel/panoramisk",
"id": "29e12ce02560debd54cbba3fc55bd0f0ef91626a",
"size": "10956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panoramisk/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58453"
},
{
"name": "Shell",
"bytes": "1691"
}
],
"symlink_target": ""
}
|
import os
from nbgrader.api import Gradebook
from nbgrader.tests import run_command
from nbgrader.tests.apps.base import BaseTestApp
class TestNbGraderAutograde(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_command("nbgrader autograde --help-all")
def test_missing_student(self, gradebook):
"""Is an error thrown when the student is missing?"""
self._copy_file("files/submitted-changed.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-changed.ipynb", "submitted/baz/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}" '.format(gradebook), retcode=1)
def test_add_missing_student(self, gradebook):
"""Can a missing student be added?"""
self._copy_file("files/submitted-changed.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-changed.ipynb", "submitted/baz/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}" --create'.format(gradebook))
assert os.path.isfile("autograded/baz/ps1/p1.ipynb")
def test_missing_assignment(self, gradebook):
"""Is an error thrown when the assignment is missing?"""
self._copy_file("files/submitted-changed.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-changed.ipynb", "submitted/ps2/foo/p1.ipynb")
run_command('nbgrader autograde ps2 --db="{}" '.format(gradebook), retcode=1)
def test_grade(self, gradebook):
"""Can files be graded?"""
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1.ipynb")
self._copy_file("files/submitted-changed.ipynb", "submitted/bar/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/timestamp.txt")
assert os.path.isfile("autograded/bar/ps1/p1.ipynb")
assert not os.path.isfile("autograded/bar/ps1/timestamp.txt")
gb = Gradebook(gradebook)
notebook = gb.find_submission_notebook("p1", "ps1", "foo")
assert notebook.score == 1
assert notebook.max_score == 7
assert notebook.needs_manual_grade == False
comment1 = gb.find_comment("set_a", "p1", "ps1", "foo")
comment2 = gb.find_comment("baz", "p1", "ps1", "foo")
comment3 = gb.find_comment("quux", "p1", "ps1", "foo")
assert comment1.comment == "No response."
assert comment2.comment == "No response."
assert comment3.comment == "No response."
notebook = gb.find_submission_notebook("p1", "ps1", "bar")
assert notebook.score == 2
assert notebook.max_score == 7
assert notebook.needs_manual_grade == True
comment1 = gb.find_comment("set_a", "p1", "ps1", "bar")
comment2 = gb.find_comment("baz", "p1", "ps1", "bar")
comment2 = gb.find_comment("quux", "p1", "ps1", "bar")
assert comment1.comment == None
assert comment2.comment == None
def test_grade_timestamp(self, gradebook):
"""Is a timestamp correctly read in?"""
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1.ipynb")
self._make_file('submitted/foo/ps1/timestamp.txt', "2015-02-02 15:58:23.948203 PST")
self._copy_file("files/submitted-changed.ipynb", "submitted/bar/ps1/p1.ipynb")
self._make_file('submitted/bar/ps1/timestamp.txt', "2015-02-01 14:58:23.948203 PST")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/timestamp.txt")
assert os.path.isfile("autograded/bar/ps1/p1.ipynb")
assert os.path.isfile("autograded/bar/ps1/timestamp.txt")
gb = Gradebook(gradebook)
submission = gb.find_submission('ps1', 'foo')
assert submission.total_seconds_late > 0
submission = gb.find_submission('ps1', 'bar')
assert submission.total_seconds_late == 0
# make sure it still works to run it a second time
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
def test_force(self, gradebook):
"""Ensure the force option works properly"""
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
self._make_file("source/ps1/foo.txt", "foo")
self._make_file("source/ps1/data/bar.txt", "bar")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1.ipynb")
self._make_file("submitted/foo/ps1/foo.txt", "foo")
self._make_file("submitted/foo/ps1/data/bar.txt", "bar")
self._make_file("submitted/foo/ps1/blah.pyc", "asdf")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that it skips the existing directory
os.remove("autograded/foo/ps1/foo.txt")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
# force overwrite the supplemental files
run_command('nbgrader autograde ps1 --db="{}" --force'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/foo.txt")
# force overwrite
os.remove("source/ps1/foo.txt")
os.remove("submitted/foo/ps1/foo.txt")
run_command('nbgrader autograde ps1 --db="{}" --force'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
def test_filter_notebook(self, gradebook):
"""Does autograding filter by notebook properly?"""
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
self._make_file("source/ps1/foo.txt", "foo")
self._make_file("source/ps1/data/bar.txt", "bar")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1.ipynb")
self._make_file("submitted/foo/ps1/foo.txt", "foo")
self._make_file("submitted/foo/ps1/data/bar.txt", "bar")
self._make_file("submitted/foo/ps1/blah.pyc", "asdf")
run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that removing the notebook still causes the autograder to run
os.remove("autograded/foo/ps1/p1.ipynb")
os.remove("autograded/foo/ps1/foo.txt")
run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that running it again doesn't do anything
os.remove("autograded/foo/ps1/foo.txt")
run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that removing the notebook doesn't caus the autograder to run
os.remove("autograded/foo/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert not os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
def test_grade_overwrite_files(self, gradebook):
"""Are dependent files properly linked and overwritten?"""
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
self._make_file("source/ps1/data.csv", "some,data\n")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1.ipynb")
self._make_file('submitted/foo/ps1/timestamp.txt', "2015-02-02 15:58:23.948203 PST")
self._make_file("submitted/foo/ps1/data.csv", "some,other,data\n")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/timestamp.txt")
assert os.path.isfile("autograded/foo/ps1/data.csv")
with open("autograded/foo/ps1/timestamp.txt", "r") as fh:
contents = fh.read()
assert contents == "2015-02-02 15:58:23.948203 PST"
with open("autograded/foo/ps1/data.csv", "r") as fh:
contents = fh.read()
assert contents == "some,data\n"
def test_side_effects(self, gradebook):
self._copy_file("files/side-effects.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/side-effects.ipynb", "submitted/foo/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/side-effect.txt")
assert not os.path.isfile("submitted/foo/ps1/side-effect.txt")
def test_skip_extra_notebooks(self, gradebook):
self._copy_file("files/submitted-unchanged.ipynb", "source/ps1/p1.ipynb")
run_command('nbgrader assign ps1 --db="{}" '.format(gradebook))
self._copy_file("files/submitted-unchanged.ipynb", "submitted/foo/ps1/p1 copy.ipynb")
self._copy_file("files/submitted-changed.ipynb", "submitted/foo/ps1/p1.ipynb")
run_command('nbgrader autograde ps1 --db="{}"'.format(gradebook))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/p1 copy.ipynb")
def test_permissions(self):
"""Are permissions properly set?"""
self._empty_notebook('source/ps1/foo.ipynb')
self._make_file("source/ps1/foo.txt", "foo")
run_command("nbgrader assign ps1 --create")
self._empty_notebook('submitted/foo/ps1/foo.ipynb')
self._make_file("source/foo/ps1/foo.txt", "foo")
run_command("nbgrader autograde ps1 --create")
assert os.path.isfile("autograded/foo/ps1/foo.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert self._get_permissions("autograded/foo/ps1/foo.ipynb") == "444"
assert self._get_permissions("autograded/foo/ps1/foo.txt") == "444"
def test_custom_permissions(self):
"""Are custom permissions properly set?"""
self._empty_notebook('source/ps1/foo.ipynb')
self._make_file("source/ps1/foo.txt", "foo")
run_command("nbgrader assign ps1 --create")
self._empty_notebook('submitted/foo/ps1/foo.ipynb')
self._make_file("source/foo/ps1/foo.txt", "foo")
run_command("nbgrader autograde ps1 --create --AutogradeApp.permissions=644")
assert os.path.isfile("autograded/foo/ps1/foo.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert self._get_permissions("autograded/foo/ps1/foo.ipynb") == "644"
assert self._get_permissions("autograded/foo/ps1/foo.txt") == "644"
|
{
"content_hash": "ac405d82c001c25dde429fff00874ff2",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 93,
"avg_line_length": 48.96138996138996,
"alnum_prop": 0.6470309912467471,
"repo_name": "alope107/nbgrader",
"id": "1cf58b2ddf9f1fde0f643965101db14d6a1d0cfa",
"size": "12681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nbgrader/tests/apps/test_nbgrader_autograde.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4114"
},
{
"name": "JavaScript",
"bytes": "162308"
},
{
"name": "Python",
"bytes": "501323"
},
{
"name": "Smarty",
"bytes": "25140"
}
],
"symlink_target": ""
}
|
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
EXPECTED_ERROR_MSG = 'Response value for gamma distribution must be greater than 0.'
def pubdev_5495():
glm = H2OGeneralizedLinearEstimator(family='gamma')
frame = h2o.import_file(pyunit_utils.locate("smalldata/logreg/prostate.csv"))
try:
glm.train(training_frame=frame, y='CAPSULE')
except h2o.exceptions.H2OResponseError as e:
assert EXPECTED_ERROR_MSG in e.args[0].dev_msg, "dev_msg should contain '%s'. Actual dev_msg is '%s'" % (EXPECTED_ERROR_MSG, e.args[0].dev_msg)
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_5495)
else:
pubdev_5495()
|
{
"content_hash": "71b5c6dc45e319dc562022499d379020",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 151,
"avg_line_length": 35.5,
"alnum_prop": 0.7140845070422536,
"repo_name": "michalkurka/h2o-3",
"id": "75b2806e510a60b62ec8ab63446d8a22c3099f38",
"size": "710",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_jira/pyunit_pubdev_5495_glm_gamma_error_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', TemplateView.as_view(template_name="index.html")),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "8f788b4d74d03c9c70427f9206fd3d7e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 28.363636363636363,
"alnum_prop": 0.7243589743589743,
"repo_name": "eHealthAfrica/dev-track",
"id": "480b60dbf7e8ff4b8bdcea0602e190a97f1b08a4",
"size": "312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demo/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3428"
}
],
"symlink_target": ""
}
|
import atexit
import datetime
from distutils.version import StrictVersion
from os import environ as env
import os
import subprocess
import sys
import seesaw
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import LimitConcurrent
from seesaw.util import find_executable
# FIXME: This is a bit of a hack.
#
# Pipeline scripts are run with pwd set to their directory, which is why
# getcwd will (often) return the Right Thing. A more robust solution would be
# nice, though.
sys.path.append(os.getcwd())
from archivebot import control
from archivebot import shared_config
from archivebot.seesaw import extensions
from archivebot.seesaw import monitoring
from archivebot.seesaw.preflight import check_wpull_args
from archivebot.seesaw.dnspythoncrash import test as dnspython_crash_fixed
from archivebot.seesaw.wpull import WpullArgs
from archivebot.seesaw.tasks import GetItemFromQueue, StartHeartbeat, \
SetFetchDepth, PreparePaths, Wpull, CompressLogIfFailed, WriteInfo, DownloadUrlFile, \
RelabelIfAborted, MoveFiles, StopHeartbeat, MarkItemAsDone, CheckIP, CheckLocalWebserver
WPULL_VERSION = ('2.0.3')
EXPIRE_TIME = 60 * 60 * 48 # 48 hours between archive requests
WPULL_EXE = find_executable('Wpull', WPULL_VERSION, ['wpull', './wpull'], '--version')
YOUTUBE_DL = find_executable('youtube-dl', None, ['./youtube-dl'], '--version')
version_integer = (sys.version_info.major * 10) + sys.version_info.minor
assert version_integer >= 33, \
"This pipeline requires Python >= 3.3. You are running %s." % \
sys.version
if not os.environ.get('NO_SEGFAULT_340'):
assert sys.version_info[:3] != (3, 4, 0), \
"Python 3.4.0 should not be used. It may segfault. " \
"Set NO_SEGFAULT_340=1 if your Python is patched. " \
"See https://bugs.python.org/issue21435"
assert WPULL_EXE, 'No usable Wpull found.'
assert YOUTUBE_DL, 'No usable youtube-dl found.'
assert 'REDIS_URL' in env, 'REDIS_URL not set.'
assert 'FINISHED_WARCS_DIR' in env, 'FINISHED_WARCS_DIR not set.'
if 'WARC_MAX_SIZE' in env:
WARC_MAX_SIZE = env['WARC_MAX_SIZE']
else:
WARC_MAX_SIZE = '5368709120'
WPULL_MONITOR_DISK = env.get('WPULL_MONITOR_DISK', '5120m')
WPULL_MONITOR_MEMORY = env.get('WPULL_MONITOR_MEMORY', '50m')
assert 'TMUX' in env or 'STY' in env or env.get('NO_SCREEN') == "1", \
"Refusing to start outside of screen or tmux, set NO_SCREEN=1 to override"
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.8b1"):
raise Exception(
"Needs seesaw@python3/development version 0.1.8b1 or higher. "
"You have version {0}".format(seesaw.__version__)
)
assert downloader not in ('ignorednick', 'YOURNICKHERE'), 'please use a real nickname'
assert datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo.utcoffset(None).seconds == 0, 'Please set the time zone to UTC'
assert dnspython_crash_fixed(), 'Broken crash-prone dnspython found'
REDIS_URL = env['REDIS_URL']
LOG_CHANNEL = shared_config.log_channel()
PIPELINE_CHANNEL = shared_config.pipeline_channel()
OPENSSL_CONF = env.get('OPENSSL_CONF')
TMPDIR = env.get('TMPDIR')
# ------------------------------------------------------------------------------
# CONTROL CONNECTION
# ------------------------------------------------------------------------------
control = control.Control(REDIS_URL, LOG_CHANNEL, PIPELINE_CHANNEL)
# ------------------------------------------------------------------------------
# SEESAW EXTENSIONS
# ------------------------------------------------------------------------------
extensions.install_stdout_extension(control)
# ------------------------------------------------------------------------------
# PIPELINE
# ------------------------------------------------------------------------------
project = Project(
title = "ArchiveBot request handler"
)
#FIXME: Same hack as above; seesaw executes pipeline.py with the pipeline dir as the cwd.
# __file__ can't be used because seesaw exec()s the file contents rather than importing the file.
REPO_DIRECTORY = os.path.dirname(os.path.realpath('.'))
def pipeline_version():
# Returns something like 20190820.5cd1e38
output = subprocess.check_output(['git', 'show', '-s', '--format=format:%cd.%h', '--date=format:%Y%m%d'], cwd = REPO_DIRECTORY)
return output.decode('utf-8').strip()
def wpull_version():
output = subprocess.check_output([WPULL_EXE, '--version'],
stderr=subprocess.STDOUT)
return output.decode('utf-8').strip()
class AcceptAny:
def __contains__(self, item):
return True
VERSION = pipeline_version()
DEFAULT_USER_AGENT = \
'ArchiveTeam ArchiveBot/%s (wpull %s) and not Mozilla/5.0 ' \
'(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/42.0.2311.90 Safari/537.36' % (VERSION, wpull_version())
_, _, _, pipeline_id = monitoring.pipeline_id()
wpull_args = WpullArgs(
default_user_agent=DEFAULT_USER_AGENT,
wpull_exe=WPULL_EXE,
youtube_dl_exe=YOUTUBE_DL,
finished_warcs_dir=os.environ["FINISHED_WARCS_DIR"],
warc_max_size=WARC_MAX_SIZE,
monitor_disk=WPULL_MONITOR_DISK,
monitor_memory=WPULL_MONITOR_MEMORY,
)
check_wpull_args(wpull_args)
wpull_env = dict(os.environ)
wpull_env['ITEM_IDENT'] = ItemInterpolation('%(ident)s')
wpull_env['LOG_KEY'] = ItemInterpolation('%(log_key)s')
wpull_env['REDIS_URL'] = REDIS_URL
if OPENSSL_CONF:
wpull_env['OPENSSL_CONF'] = OPENSSL_CONF
if TMPDIR:
wpull_env['TMPDIR'] = TMPDIR
pipeline = Pipeline(
CheckIP(),
CheckLocalWebserver(),
GetItemFromQueue(control, pipeline_id, downloader,
ao_only=env.get('AO_ONLY'), large=env.get('LARGE'),
version_check = (VERSION, pipeline_version)),
StartHeartbeat(control),
SetFetchDepth(),
PreparePaths(),
WriteInfo(),
DownloadUrlFile(control),
Wpull(
wpull_args,
accept_on_exit_code=AcceptAny(),
env=wpull_env,
),
RelabelIfAborted(control),
CompressLogIfFailed(),
WriteInfo(),
MoveFiles(target_directory = os.environ["FINISHED_WARCS_DIR"]),
StopHeartbeat(),
MarkItemAsDone(control, EXPIRE_TIME)
)
def stop_control():
#control.flag_logging_thread_for_termination()
control.unregister_pipeline(pipeline_id)
pipeline.on_cleanup += stop_control
pipeline.running_status = "Running"
def status_running():
pipeline.running_status = "Running"
pipeline.on_stop_canceled += status_running
def status_stopping():
pipeline.running_status = "Stopping"
pipeline.on_stop_requested += status_stopping
# Activate system monitoring.
monitoring.start(pipeline, control, VERSION, downloader)
print('*' * 60)
print('Pipeline ID: %s' % pipeline_id)
if env.get('AO_ONLY'):
print('!ao-only mode enabled; pipeline will accept jobs queued with !ao '
'(and not jobs queued with !a or --pipeline)')
elif env.get('LARGE'):
print('large mode enabled; pipeline will accept jobs queued with !a'
' --large')
elif env.get('LARGE') and env.get('AO_ONLY'):
print('!ao-only and large modes enabled. THIS IS PROBABLY A MISTAKE. '
' Pipeline will accept only jobs queued with --large or !ao.')
print('*' * 60)
print()
# vim:ts=4:sw=4:et:tw=78
|
{
"content_hash": "7631e1bea410d1f9cf84eab625fbcbea",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 135,
"avg_line_length": 33.76851851851852,
"alnum_prop": 0.6591719221277762,
"repo_name": "ArchiveTeam/ArchiveBot",
"id": "73abd2d91018430229d91f61779c364a52d9c3af",
"size": "7294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927"
},
{
"name": "Dockerfile",
"bytes": "474"
},
{
"name": "HTML",
"bytes": "81862"
},
{
"name": "Haxe",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "6838"
},
{
"name": "Makefile",
"bytes": "79"
},
{
"name": "Python",
"bytes": "138681"
},
{
"name": "Ruby",
"bytes": "98065"
},
{
"name": "Shell",
"bytes": "1762"
}
],
"symlink_target": ""
}
|
from canvas import util
from canvas.tests.tests_helpers import CanvasTestCase
class TestUploads(CanvasTestCase):
def _test_upload(self, filename):
f = file("static/img/tests/" + filename, "rb")
http_response = self.post('/api/upload', { "file": f })
response = util.loads(http_response.content)
self.assertTrue(response['success'])
def test_upload_animated_gif(self):
self._test_upload("animated.gif")
def test_upload_opaque_jpeg(self):
self._test_upload("opaque.jpg")
def test_upload_transparent_png(self):
self._test_upload("transparent.png")
|
{
"content_hash": "b7c4821188842f604fc4097ea67a1746",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 34.5,
"alnum_prop": 0.6634460547504025,
"repo_name": "canvasnetworks/canvas",
"id": "f8f19aa7a6d0bec90be317b8900d9b645211e6a9",
"size": "621",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/canvas/tests/test_upload.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "537625"
},
{
"name": "HTML",
"bytes": "689709"
},
{
"name": "JavaScript",
"bytes": "1313262"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6659685"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
}
|
import unittest
from itest_support import IntegrationTestSupport
from pybuilder.errors import MissingPluginException
class Test(IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import use_plugin
use_plugin("pypi:thispluginsdoesnotandshouldnotexist")
name = "plugin-fail-test"
default_task = "publish"
""")
self.create_directory("src/main/python/spam")
self.write_file("src/main/python/spam/__init__.py", "")
self.create_directory("src/main/scripts")
self.write_file("src/main/scripts/spam", "print('spam')")
self.assertRaises(MissingPluginException, self.prepare_reactor)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "c5221c53feeb1485af42dbbed926eb30",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 24.82758620689655,
"alnum_prop": 0.7,
"repo_name": "paolodedios/pybuilder",
"id": "40830e030494e95a283e45b1df106e7b095ed2f0",
"size": "1354",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/integrationtest/python/should_gracefully_fail_when_no_remote_plugin_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1567"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "Perl",
"bytes": "4025"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "2699121"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('adyen', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='adyentransaction',
name='order_number',
field=models.CharField(max_length=255),
),
]
|
{
"content_hash": "d4e731a0a834b29dd09d4df2b8acc0b4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.5968586387434555,
"repo_name": "oscaro/django-oscar-adyen",
"id": "c54d1a141d6f00d2cf76a1976c79a023df6f34f2",
"size": "406",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "adyen/migrations/0002_auto_20141016_1601.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75681"
}
],
"symlink_target": ""
}
|
"""
nodes for scan operations
"""
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import theano
from .. import core
from .. import utils
@core.register_node("scan_input")
class ScanInputNode(core.NodeImpl):
"""
Node that transfroms a sequence-wise input to an element-wise input,
for replacement in a ScanNode.
"""
def compute_output(self, network, in_vw):
scan_axis = network.find_hyperparameter(["scan_axis"], 1)
def remove_scan_axis(in_val):
out_val = list(in_val)
out_val.pop(scan_axis)
# assuming in_val is a list or tuple
if isinstance(in_val, tuple):
return tuple(out_val)
else:
assert isinstance(in_val, list)
return out_val
# construct output
network.create_vw(
name="default",
is_shared=False,
shape=remove_scan_axis(in_vw.shape),
broadcastable=remove_scan_axis(in_vw.broadcastable),
tags={"input"},
)
@core.register_node("scan_state")
class ScanStateNode(core.NodeImpl):
"""
Container for hidden state, where one specifies an initial value and
a next value via other nodes in the tree.
NOTE: initial state MUST have the correct shape (ie. same shape as
the output of the next_state node)
initial_scan_state_reference:
optional reference to a node to take initial state from
if not given, the default input of the node is used
"""
hyperparameter_names = ("initial_scan_state_reference",
"initial_state_reference",
"initial_state",
"next_scan_state_reference",
"next_state_reference",
"next_state")
input_keys = ("initial_state",)
def init_state(self, network):
node_name = network.find_hyperparameter(
["initial_scan_state_reference",
"initial_state_reference",
"initial_state"],
None)
if node_name is None:
# otherwise set it to the default input of the node
node_name = network.get_all_input_edges()["default"]
# add dependency in dag
network.take_output_from(
node_name,
to_key="initial_state")
# set next state node
next_state = network.find_hyperparameter(["next_scan_state_reference",
"next_state_reference",
"next_state"])
network.set_data("next_state", next_state)
def compute_output(self, network, initial_state):
# copy initial state, to be later referenced
network.copy_vw(
name="initial_state",
previous_vw=initial_state,
tags={"input"},
)
# create a new variable representing the output of this node,
# so that the scan node can replace it with the node's input at a
# previous time step
network.create_vw(
name="default",
is_shared=False,
shape=initial_state.shape,
broadcastable=initial_state.broadcastable,
tags={"output"},
)
@core.register_node("scan")
class ScanNode(core.Wrapper1NodeImpl):
"""
root node for a scan operation. transforms an element-wise child subtree
into a sequence-wise subtree, taking into account ScanStateNode's
"""
hyperparameter_names = ("scan_axis", )
input_keys = ("default", "final_child_output",)
def __init__(self, name, *args, **kwargs):
super(ScanNode, self).__init__(name, *args, **kwargs)
self._scan_input_node = ScanInputNode(name=self.name + "_input")
def architecture_children(self):
return ([self._scan_input_node]
+ super(ScanNode, self).architecture_children())
def init_state(self, network):
# send input to scan input node, and take output from child
super(ScanNode, self).init_state(network)
# link scan input node to child
child1, child2 = self.architecture_children()
network.add_dependency(child1.name, child2.name)
def compute_output(self, network, sequence_input, element_output):
scan_axis = network.find_hyperparameter(["scan_axis"], 1)
# FIXME delete hard coded axis when actually shuffling dimensions
scan_axis = 0
# ################################ sequences ##########################
# for now, only the single ScanInputNode
input_network = network[self._scan_input_node.name]
element_input = input_network.get_vw("default").variable
element_input_vars = [element_input]
# FIXME shuffle dimensions to use scan_axis
# TODO assert ndim is right
input_sequences = [sequence_input.variable]
# ############################### outputs_info #######################
# ---
# sources:
# 1. all outputs of the subtree
# 2. next state for scan state node
# outputs
# original_outputs are ones whose variable will be replaced with a
# version with an additional scan axis
# NOTE: having variables from nested scans is desired here, because
# we want to transform the output into a version with a sequence
# dimension
original_outputs = network.find_vws_in_subtree(tags={"output"})
# set all_outputs to a copy of original_outputs
all_outputs = list(original_outputs)
# in case the element-wise output of the scan is not tagged as an
# "output" variable, we don't want to replace it with a sequence
if element_output not in all_outputs:
all_outputs.append(element_output)
# find corresponding variable wrappers
element_output_vars = [variable_wrapper.variable
for variable_wrapper in all_outputs]
# set initial outputs_info that we do not care about the values
outputs_info = [None] * len(all_outputs)
# scan state nodes
# FIXME do not get state in a nested scan
scan_state_nodes = network.find_nodes_in_subtree(ScanStateNode)
scan_state_networks = [network[node.name] for node in scan_state_nodes]
scan_state_outputs = [net.get_vw("default")
for net in scan_state_networks]
scan_state_vars = [variable_wrapper.variable
for variable_wrapper in scan_state_outputs]
# finding idxs of scan state nodes in
scan_state_idxs = [all_outputs.index(var)
for var in scan_state_outputs]
# find the order that the states will appear in scan's inputs
scan_state_order = sorted(range(len(scan_state_idxs)),
key=lambda x: scan_state_idxs[x])
# get the node name of the next state from each scan state node
scan_state_next_names = [net.get_data("next_state")
for net in scan_state_networks]
# each scan state should probably have a unique next state
# delete the assertion if this assumption does not hold
# (the code should work just fine, this is just a sanity check)
assert len(scan_state_next_names) == len(set(scan_state_next_names))
scan_state_next_networks = [network[name]
for name in scan_state_next_names]
scan_state_next_vws = [net.get_vw("default")
for net in scan_state_next_networks]
scan_state_next_idxs = [all_outputs.index(var)
for var in scan_state_next_vws]
# finding initial states
scan_state_initial_vws = [net.get_vw("initial_state")
for net in scan_state_networks]
# updates outputs_info to contain initial state
for idx, node, init_vw, next_vw in zip(scan_state_idxs,
scan_state_nodes,
scan_state_initial_vws,
scan_state_next_vws):
# make sure initial and final shape are the same
# ---
# NOTE: node is only passed in for debugging purposes
assert init_vw.shape == next_vw.shape, dict(
msg=("Initial and final state from ScanStateNode must be "
"the same."),
node=node,
init_shape=init_vw.shape,
next_shape=next_vw.shape,
init_vw=init_vw,
next_vw=next_vw,
)
outputs_info[idx] = init_vw.variable
# ############################## non_sequences ########################
# ---
# for now, don't specify any non-sequences and hope that theano
# can optimize the graph
non_sequence_vars = []
non_sequences = []
# ################################### scan ############################
def step(*scan_vars):
# calculate number for each type of scan var
num_inputs = len(input_sequences)
num_outputs = len([x for x in outputs_info if x is not None])
num_non_sequences = len(non_sequences)
assert len(scan_vars) == (num_inputs
+ num_outputs
+ num_non_sequences)
assert num_outputs == len(scan_state_nodes)
# break down scan vars into appropriate categories
scan_input_vars = scan_vars[:num_inputs]
scan_output_vars = scan_vars[num_inputs:num_inputs + num_outputs]
scan_non_sequences = scan_vars[num_inputs + num_outputs:]
# setup variables for replacement
to_replace = []
for_replace = []
# input vars
to_replace += element_input_vars
for_replace += scan_input_vars
# non sequences
to_replace += non_sequence_vars
for_replace += scan_non_sequences
# scan state nodes
for state_idx, scan_output_var in zip(scan_state_order,
scan_output_vars):
to_replace.append(scan_state_vars[state_idx])
for_replace.append(scan_output_var)
assert len(to_replace) == len(for_replace)
# perform scan
new_outputs = utils.deep_clone(
element_output_vars,
replace=dict(zip(to_replace, for_replace)),
)
final_outputs = list(new_outputs)
# set next state for recurrent state nodes
for state_idx, next_idx in zip(scan_state_idxs,
scan_state_next_idxs):
final_outputs[state_idx] = final_outputs[next_idx]
return final_outputs
# edit all outputs of subtree
results, updates = theano.scan(
fn=step,
outputs_info=outputs_info,
sequences=input_sequences,
non_sequences=non_sequences,
)
# ############################# post-processing #######################
# scan automatically unwraps lists, so rewrap if needed
if not isinstance(results, list):
results = [results]
assert len(results) == len(element_output_vars)
result_map = dict(zip(element_output_vars, results))
# TODO store updates in network (to later be used in new_update_deltas)
# NOTE: before doing this, look into the effects of manipulating the
# update deltas of random variables - it might not work as expected
# FIXME unshuffle dimensions for scan axes
def transform_shape(old_shape):
tmp = list(old_shape)
tmp.insert(scan_axis, None)
# FIXME what if length is < scan_axis
return tuple(tmp)
def transform_output(output_variable):
# FIXME
return output_variable
# FIXME mutate current_variables in network to have new updates
# create final output with the appropriate result
network.create_vw(
name="default",
variable=transform_output(result_map[element_output.variable]),
shape=transform_shape(element_output.shape),
)
|
{
"content_hash": "03f38137613f26448ef3b94b6bd268c4",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 39.91798107255521,
"alnum_prop": 0.5561877667140825,
"repo_name": "diogo149/treeano",
"id": "9d47f0211cb68b5b27b94baffbe7c8150b762bd1",
"size": "12654",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "treeano/nodes/scan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16042"
},
{
"name": "Python",
"bytes": "864524"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
import pajbot.web.routes.admin.banphrases
import pajbot.web.routes.admin.commands
import pajbot.web.routes.admin.home
import pajbot.web.routes.admin.links
import pajbot.web.routes.admin.moderators
import pajbot.web.routes.admin.modules
import pajbot.web.routes.admin.playsounds
import pajbot.web.routes.admin.predictions
import pajbot.web.routes.admin.streamer
import pajbot.web.routes.admin.timers
def init(app):
page = Blueprint("admin", __name__, url_prefix="/admin")
pajbot.web.routes.admin.banphrases.init(page)
pajbot.web.routes.admin.commands.init(page)
pajbot.web.routes.admin.home.init(page)
pajbot.web.routes.admin.links.init(page)
pajbot.web.routes.admin.moderators.init(page)
pajbot.web.routes.admin.modules.init(page)
pajbot.web.routes.admin.playsounds.init(page)
pajbot.web.routes.admin.predictions.init(page)
pajbot.web.routes.admin.streamer.init(page)
pajbot.web.routes.admin.timers.init(page)
app.register_blueprint(page)
|
{
"content_hash": "5800862c62ca6cb9594edd2ffd3aec92",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 60,
"avg_line_length": 35.172413793103445,
"alnum_prop": 0.7764705882352941,
"repo_name": "pajlada/pajbot",
"id": "274fb871abf6242c9668a6f96365b3fdf5c61c47",
"size": "1020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pajbot/web/routes/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11288"
},
{
"name": "HTML",
"bytes": "129576"
},
{
"name": "JavaScript",
"bytes": "202450"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "987601"
},
{
"name": "Shell",
"bytes": "589"
}
],
"symlink_target": ""
}
|
"""
Command Session
TODO:
- contextmanager to allow caller to set stream preference
- override of environment and cwd
- ParamDict should unpack itself (CommandSession should nto have to)
"""
__all__ = ["CommandError", "CommandSession", "CommandSessionMixin", "ParamDict"]
import os
import sys
import six
import subprocess
from boltons.dictutils import OrderedMultiDict
ParamDict = OrderedMultiDict
class CommandError(subprocess.CalledProcessError):
def __init__(self, session):
super(CommandError, self).__init__(
session.last_returncode,
session.last_command,
output=session.last_output
)
def __str__(self):
return "Command {} returned {} with {}".format(
self.cmd,
self.returncode,
self.output
)
class CommandSession(object):
"""Wrapper around multiple invocations of subprocess.Popen, tracking
and saving execution results and output.
Provides methods having similar interface as `subprocess.call()`,
`subprocess.check_call()`, and subprocess.check_output()`.
Params:
stream (bool): should output be streamed to stdout, in additin to being
captured?
env (dict): environment to use, in place of current process environment
cwd (str): current working directory to replace `os.cwd()`
force_shell (bool): whether or not to force `shell=True` param
to `Popen()`; if the shell (/bin/sh) is required (for instance,
if you intend to use a shell builtin), you can force this;
normally, `shell` param is only used for commands given as
strings (subprocess prefers commands given as lists, which is
the default for this module).
"""
def __init__(self, stream=False, env=None, cwd=None, force_shell=False):
self.log = []
self._stream = sys.stdout if stream else None
self._env = env
self._cwd = cwd
self._shell = force_shell
@property
def last_returncode(self):
"""Get the return code of the last command exevuted."""
try:
return self.log[-1][1]
except IndexError:
raise RuntimeError('Nothing executed')
@property
def last_command(self):
"""Get the output of the last command exevuted."""
if not len(self.log):
raise RuntimeError('Nothing executed')
return self.log[-1][0]
@property
def last_output(self):
"""Get the output of the last command exevuted."""
if not len(self.log):
raise RuntimeError('Nothing executed')
return self.log[-1][2]
@property
def last_error(self):
"""Get the output of the last command exevuted."""
if not len(self.log):
raise RuntimeError('Nothing executed')
try:
errs = [l for l in self.log if l[1] != 0]
return errs[-1][2]
except IndexError:
# odd case where there were no errors
#TODO
return 'no last error'
@property
def output(self):
"""Get the output of the entire session."""
return '\n'.join(['\n'.join(l[2]) for l in self.log])
def _stream_write(self, line):
if self._stream:
self._stream.write(line)
self._stream.write(os.linesep)
def _exec(self, cmd):
shell = self._shell
if isinstance(cmd, six.string_types):
shell=True
popen_kwargs = {
'shell': shell,
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT
}
if self._env:
popen_kwargs['env'] = self._env
if self._cwd:
popen_kwargs['cwd'] = self._cwd
p = subprocess.Popen(
cmd, **popen_kwargs
)
output = []
for line in iter(p.stdout.readline, six.binary_type('', 'utf-8')):
line = line.decode('utf-8').strip()
self._stream_write(line)
output.append(line)
p.wait()
log = [
' '.join(cmd) if not shell else cmd,
p.returncode,
output
]
self.log.append(log)
return p.returncode, '\n'.join(output)
def check_output(self, cmd):
"""Wrapper for subprocess.check_output."""
ret, output = self._exec(cmd)
if not ret == 0:
raise CommandError(self)
return output
def check_call(self, cmd):
"""Fake the interface of subprocess.call()."""
ret, _ = self._exec(cmd)
if not ret == 0:
raise CommandError(self)
return ret
def call(self, cmd):
"""Fake the interface of subprocess.call()."""
ret, _ = self._exec(cmd)
return ret
@staticmethod
def unpack_args(*args, **kwargs):
gnu = kwargs.pop('gnu', False)
assert isinstance(gnu, bool)
def _transform(argname):
"""Transform a python identifier into a
shell-appropriate argument name
"""
if len(argname) == 1:
return '-{}'.format(argname)
return '--{}'.format(argname.replace('_', '-'))
ret = []
for k, v in kwargs.items():
if isinstance(v, list):
for item in v:
if gnu:
ret.append('{}={}'.format(
_transform(k),
str(item)
))
else:
ret.extend([
_transform(k),
str(item)
])
else:
if gnu:
ret.append('{}={}'.format(_transform(k), str(v)))
else:
ret.extend([
_transform(k),
str(v)
])
if len(args):
for item in args:
ret.append(_transform(item))
return ret
@staticmethod
def unpack_pargs(positional_args, param_kwargs, gnu=False):
"""Unpack multidict and positional args into a
list appropriate for subprocess.
:param param_kwargs:
``ParamDict`` storing '--param' style data.
:param positional_args: flags
:param gnu:
if True, long-name args are unpacked as:
--parameter=argument
otherwise, they are unpacked as:
--parameter argument
:returns: list appropriate for sending to subprocess
"""
def _transform(argname):
"""Transform a python identifier into a
shell-appropriate argument name
"""
if len(argname) == 1:
return '-{}'.format(argname)
return '--{}'.format(argname.replace('_', '-'))
args = []
for item in param_kwargs.keys():
for value in param_kwargs.getlist(item):
if gnu:
args.append('{}={}'.format(
_transform(item),
value
))
else:
args.extend([
_transform(item),
value
])
if positional_args:
for item in positional_args:
args.append(_transform(item))
return args
class CommandSessionMixin(object):
def __init__(self, session=None):
self.session = session or CommandSession()
|
{
"content_hash": "02f54bf5605e112d63f398337e4a02c5",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 80,
"avg_line_length": 29.836575875486382,
"alnum_prop": 0.5182576943140323,
"repo_name": "mikewaters/command-session",
"id": "fe1b6398a905e039e3c07297f0bbc8ccc7a66160",
"size": "7692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commandsession/commandsession.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8723"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from cnntools.utils import add_bkovacs_to_path, add_caffe_to_path
class Command(BaseCommand):
args = '<trainingfile_source_relpath> <weights_source_relpath> <trainingfile_target_relpath> <weights_target_relpath>'
help = 'Copies weights from one branch of the siamese network to the other, so it can be trained with untied weights'
def handle(self, *args, **option):
if len(args) != 5:
print 'Incorrect number of arguments!'
print 'Usage: ./manage.py cnntools_untie_siamese [options] %s' % Command.args
return
trainingfile_source_relpath = args[0]
weights_source_relpath = args[1]
trainingfile_target_relpath = args[2]
weights_target_relpath = args[3]
suffix = args[4]
trainingfile_source_path = os.path.join(settings.CAFFE_ROOT, trainingfile_source_relpath)
trainingfile_target_path = os.path.join(settings.CAFFE_ROOT, trainingfile_target_relpath)
add_caffe_to_path()
add_bkovacs_to_path()
import caffe
# Change working directory to Caffe
os.chdir(settings.CAFFE_ROOT)
net_source = caffe.Net(
trainingfile_source_path,
os.path.join(settings.CAFFE_ROOT, weights_source_relpath),
caffe.TEST
)
net_target = caffe.Net(
trainingfile_target_path,
caffe.TEST
)
# Get all source names
names = net_target.params.keys()
print 'Computing param mapping...'
param_mapping = {}
for name in names:
if suffix in name:
orig_name = name.replace(suffix, '')
if orig_name not in names:
print 'Warning: the corresponding name to {} ({}) is not among the blob names, skipping weight transfer!'
else:
param_mapping[name] = orig_name
for i, (t, s) in enumerate(param_mapping.iteritems()):
if s not in net_source.params:
print 'Couldn\'nt find {} among the source net params, skipping...'.format(s)
continue
# Weights and biases
for blob_idx in (0, 1):
print '%s %s %s <-- %s %s %s' % (
t, blob_idx, net_target.params[t][blob_idx].data.shape,
s, blob_idx, net_source.params[s][blob_idx].data.shape,
)
net_target.params[s][blob_idx].data[...] = net_source.params[s][blob_idx].data
net_target.params[t][blob_idx].data[...] = net_source.params[s][blob_idx].data
net_target.save(
os.path.join(settings.CAFFE_ROOT, weights_target_relpath),
)
print 'Done.'
|
{
"content_hash": "faae8c58ad85efb5ded45372c92116ef",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 125,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.5835095137420718,
"repo_name": "kovibalu/cnntools",
"id": "a52581409baa0a2ca89f05ebe5693d6bd69f84c9",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "management/commands/cnntools_untie_siamese.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "1155"
},
{
"name": "HTML",
"bytes": "9727"
},
{
"name": "Python",
"bytes": "180617"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
}
|
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
# serve static files via whitenoise
WHITENOISE_MIDDLEWARE = ('whitenoise.middleware.WhiteNoiseMiddleware', )
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS')
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
INSTALLED_APPS += ('storages', )
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# COMPRESSOR
# ------------------------------------------------------------------------------
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
# sentry
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env(
'DJANGO_SENTRY_CLIENT',
default='raven.contrib.django.raven_compat.DjangoClient'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
}
# Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
|
{
"content_hash": "2b3b50abc771ba4a4b499ca67ab0238f",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 98,
"avg_line_length": 32.75496688741722,
"alnum_prop": 0.5646987464617873,
"repo_name": "KlubJagiellonski/poznaj-app-backend",
"id": "3d9029b2ab70806987f446601f6e1305344ce07d",
"size": "4946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/production.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "Dockerfile",
"bytes": "533"
},
{
"name": "HTML",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "41829"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
}
|
import itertools
import random
import requests
import logging
registry = {}
class Plugin(type):
def __new__(metacls, name, bases, namespace, **kwargs):
cls = type.__new__(metacls, name, bases, dict(namespace))
if hasattr(cls, "__provider_name__"):
registry[cls.__provider_name__] = cls
return cls
class TeleportationProvider(object):
__metaclass__ = Plugin
GEOIP_URL = 'http://geome-1042.appspot.com/'
def __init__(self, name, countries, debug=False, **kwargs):
self.name = name
self.countries = countries
self.debug = debug
self.kwargs = kwargs
def __repr__(self):
return "{}: {}".format(self.__provider_name__, self.name)
def can_teleport_to(self, place):
return place in self.countries
def teleport(self, place):
"""
try teleporting
"""
raise NotImplemented
@property
def is_proxy(self):
return False
def where_we_teleported(self):
return requests.get(self.GEOIP_URL, proxies=self.get_proxies()).text.lower()
def go_home(self):
pass
def get_proxies(self):
return {}
def get_peer_address(self):
raise NotImplementedError
def _shuffle(i):
i = list(i)
random.shuffle(i)
return i
def _construct(args):
if args["type"] not in registry:
raise RuntimeError("unsupported teleporation provider '{}'".format(args["type"]))
return registry[args["type"]](**args)
class Teleport(object):
def __init__(self, config):
self.config = config
def get_sorted_providers(self):
by_priority = lambda provider: provider["priority"]
sorted_by_priority = sorted(self.config["providers"], key=by_priority)
grouped_by_priority = itertools.groupby(sorted_by_priority, key=by_priority)
res = []
for _, providers in grouped_by_priority:
for args in _shuffle(providers):
res.append(_construct(args))
return res
def who_can_teleport_to(self, place):
return [
provider for provider in self.get_sorted_providers()
if provider.can_teleport_to(place)
]
def goto(self, place):
"""
If you want to go somewhere, goto is the best way to get there.
- Ken Thompson
"""
providers = self.who_can_teleport_to(place)
if not providers:
raise RuntimeError('no providers for "{}"'.format(place))
logging.info('providers for %s: %s', place, providers)
_errors = []
for provider in providers:
logging.info('trying provider: {}'.format(provider))
try:
if provider.teleport(place):
return provider
logging.error('provider {} didn\'t work out, going home'.format(provider))
provider.go_home()
except Exception as e:
logging.exception('provider %s failed', provider)
_errors.append(e)
raise RuntimeError('failed to teleport to "{}" (errors: {})'.format(place, _errors))
|
{
"content_hash": "cd52642ecfd8e3224d19ed94e5b909b0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 92,
"avg_line_length": 25.612903225806452,
"alnum_prop": 0.5815491183879093,
"repo_name": "EverythingMe/teleport",
"id": "e70ea4e129a3e568f75a79b67abe376b2482fe54",
"size": "3176",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "teleport/teleport.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "36038"
}
],
"symlink_target": ""
}
|
"""Event Related PAC."""
import json
with open("../../paper.json", 'r') as f: cfg = json.load(f) # noqa
import numpy as np
from tensorpac import EventRelatedPac
from tensorpac.signals import pac_signals_wavelet
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
sns.set_style("white")
plt.rc('font', family=cfg["font"])
###############################################################################
n_epochs = 300
n_times = 1000
sf = 1000.
###############################################################################
x1, tvec = pac_signals_wavelet(f_pha=10, f_amp=100, n_epochs=n_epochs, noise=2,
n_times=n_times, sf=sf)
x2 = np.random.rand(n_epochs, 1000)
x = np.concatenate((x1, x2), axis=1)
time = np.arange(x.shape[1]) / sf
p = EventRelatedPac(f_pha=[9, 11], f_amp='hres')
pha = p.filter(sf, x, ftype='phase', n_jobs=-1)
amp = p.filter(sf, x, ftype='amplitude', n_jobs=-1)
plt.figure(figsize=(14, 6))
for n_m, (method, nb) in enumerate(zip(['circular', 'gc'], ['A', 'B'])):
# to be fair with the comparison between ERPAC and gcERPAC, the smoothing
# parameter of the gcERPAC is turned off but results could look way better
# if for example with add a `smooth=20`
erpac = p.fit(pha, amp, method=method, n_jobs=-1).squeeze()
plt.subplot(1, 2, n_m + 1)
p.pacplot(erpac, time, p.yvec, xlabel='Time (second)', cmap=cfg["cmap"],
ylabel='Frequency for amplitude (Hz)', title=p.method,
vmin=0., rmaxis=True, fz_labels=20, fz_title=22, fz_cblabel=20)
plt.axvline(1., linestyle='--', color='w', linewidth=2)
if n_m == 1: plt.ylabel('')
ax = plt.gca()
ax.text(*tuple(cfg["nb_pos"]), nb, transform=ax.transAxes, **cfg["nb_cfg"])
plt.tight_layout()
plt.savefig(f"../figures/Fig5.png", dpi=300, bbox_inches='tight')
plt.show()
|
{
"content_hash": "b39058e8bac8dc6d39c7107497e8fe34",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 35.90384615384615,
"alnum_prop": 0.5838243170862346,
"repo_name": "EtienneCmb/tensorpac",
"id": "40bbbd892339c57ab23209785e61db2dbd96a560",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paper/manuscript/code/fig_5_erpac.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "354"
},
{
"name": "Python",
"bytes": "140014"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
""" Provides style checking for java and python using checkstyle and pylint. """
import fnmatch
import subprocess
import os
import sys
import re
import xml.etree.ElementTree as ET
CHECKSTYLE_CONFIG = "javastyle.xml"
PYLINT_CONFIG = "pythonstyle.conf"
PYLINT_ERROR_FMT = re.compile(r"""
^(?P<file>.+?):(?P<line>[0-9]+):\ # file name and line number
\[(?P<type>[a-z])(?P<errno>\d+)? # message type and error number, e.g. E0101
(,\ (?P<hint>.+))?\]\ # optional class or function name
(?P<msg>.*) # finally, the error message
""", re.IGNORECASE|re.VERBOSE)
def hg_get_commit_changes():
""" Runs the hg status command and parses its output. Returns a list of
changed files. """
files = str(subprocess.check_output(["hg", "st", "-man"]))
return [line for line in files.splitlines() if line != '']
def hg_get_push_changes():
""" Returns a list of all the changed files which are to be pushed
to the remote repository. """
commits = ""
try:
commits = str(subprocess.check_output(["hg", "outgoing", "-q"]))
except subprocess.CalledProcessError:
pass
commits = [line for line in commits.splitlines() if line != '']
commits = [line.partition(':')[2] for line in commits]
all_files = set()
for commit in commits:
files = str(subprocess.check_output(["hg", "st", "-man", "--change",
commit]))
files = [line for line in files.splitlines() if line != '']
for file_name in files:
all_files.add(file_name)
return list(all_files)
def hg_get_all_files():
""" Returns a list of all the files in the repository. """
files = str(subprocess.check_output(["hg", "st", "-man", "--all"]))
return [line for line in files.splitlines() if line != '']
def run_checkstyle(file_name):
""" Runs the checkstyle command on the given file and returns
a list of all the errors. """
# Run checkstyle.
output = None
try:
with open(os.devnull, "w") as fnull:
cmd = ["checkstyle", "-c", CHECKSTYLE_CONFIG, "-f", "xml", file_name]
subprocess.check_output(cmd, stderr=fnull)
except subprocess.CalledProcessError as err:
output = err.output
# If there were no errors, return an empty list.
if output == None:
return []
# Parse checkstyle output.
root = None
try:
root = ET.fromstring(output)
except ET.ParseError as err:
return [(0, "Could not parse checkstyle output: %s\n\
Checkstyle output: %s" % (err, output))]
# There is no way it is None.
assert root != None
assert len(root) == 1
# Collect all error messages.
result = []
for child in root:
for err in child:
line = err.attrib["line"]
message = err.attrib["message"]
result.append((line, message))
return result
def check_java_files(all_files):
""" Checks all java files from the given list for stylistic mistakes. """
files = [path for path in all_files if fnmatch.fnmatch(path, '*.java')]
error_count = 0
for filename in files:
errors = run_checkstyle(filename)
error_count += len(errors)
for err in errors:
print "%s:%s - %s" % (filename, err[0], err[1])
return error_count
def run_pylint(file_name):
""" Runs the pylint command on the given file and returns
a list of all the errors. """
# Run checkstyle.
output = None
try:
with open(os.devnull, "w") as fnull:
cmd = ["pylint", "--rcfile", PYLINT_CONFIG, file_name]
subprocess.check_output(cmd, stderr=fnull)
except subprocess.CalledProcessError as err:
output = err.output
# If there were no errors, return an empty list.
if output == None:
return []
result = []
for line in output.splitlines():
match = PYLINT_ERROR_FMT.match(line)
if match == None:
result.append((0, "Could not parse pylint output: %s" % line))
continue
line, hint, msg = match.group('line', 'hint', 'msg')
result.append((line, "in %s, %s" % (hint, msg) if hint != None else msg))
return result
def check_python_files(all_files):
""" Checks all python files from the given list for stylistic mistakes. """
files = [path for path in all_files if fnmatch.fnmatch(path, '*.py')]
error_count = 0
for filename in files:
errors = run_pylint(filename)
error_count += len(errors)
for err in errors:
print "%s:%s - %s" % (filename, err[0], err[1])
return error_count
def main(files):
""" Main checkstyle method. """
error_count = 0
error_count += check_java_files(files)
error_count += check_python_files(files)
sys.exit(error_count)
def parse_args():
""" Parses arguments and runs main. """
import argparse
parser = argparse.ArgumentParser("checkstyle")
parser.add_argument('files', type=str, nargs='*',
help="files to process")
parser.add_argument('-a', '--all', action='store_true',
help='all the files in the repository')
parser.add_argument('-c', '--commit', action='store_true',
help='files in the last commit')
parser.add_argument('-p', '--push', action='store_true',
help='files which are to be pushed')
args = parser.parse_args()
files = args.files
if args.all:
files += hg_get_all_files()
if args.commit:
files += hg_get_commit_changes()
if args.push:
files += hg_get_push_changes()
files = list(set(files))
main(files)
if __name__ == "__main__":
parse_args()
|
{
"content_hash": "0efd8053307ca887702ad4ed2d066c56",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 80,
"avg_line_length": 28.873015873015873,
"alnum_prop": 0.6263514751695071,
"repo_name": "bmteam/pink-ponies",
"id": "97d3d9c7924d193ab343948a3e5bd876c5767849",
"size": "5476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkstyle.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "78853"
},
{
"name": "Python",
"bytes": "5476"
}
],
"symlink_target": ""
}
|
from tests.utils import pool, storage
from tests.utils.wallet import create_and_open_wallet
from indy import wallet, signus, ledger
from indy.pool import close_pool_ledger
from indy.error import ErrorCode, IndyError
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(autouse=True)
def before_after_each():
storage.cleanup()
yield
storage.cleanup()
@pytest.fixture
async def pool_handle():
handle = await pool.create_and_open_pool_ledger("pool_1")
yield handle
await close_pool_ledger(handle)
@pytest.fixture
async def wallet_handle():
handle = await create_and_open_wallet()
yield handle
await wallet.close_wallet(handle)
@pytest.mark.asyncio
async def test_sign_and_submit_request_works(wallet_handle, pool_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle, '{"seed":"00000000000000000000000000000My1"}')
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
nym_request = await ledger.build_nym_request(trustee_did, my_did, None, None, None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did, nym_request)
@pytest.mark.asyncio
async def test_sign_and_submit_request_works_for_invalid_pool_handle(wallet_handle, pool_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle, '{"seed":"00000000000000000000000000000My1"}')
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
nym_request = await ledger.build_nym_request(trustee_did, my_did, None, None, None)
invalid_pool_handle = pool_handle + 1
with pytest.raises(IndyError) as e:
await ledger.sign_and_submit_request(invalid_pool_handle, wallet_handle, trustee_did,
nym_request)
assert ErrorCode.PoolLedgerInvalidPoolHandle == e.value.error_code
@pytest.mark.asyncio
async def test_sign_and_submit_request_works_for_invalid_wallet_handle(wallet_handle, pool_handle):
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle, '{"seed":"00000000000000000000000000000My1"}')
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
nym_request = await ledger.build_nym_request(trustee_did, my_did, None, None, None)
invalid_wallet_handle = wallet_handle + 1
with pytest.raises(IndyError) as e:
await ledger.sign_and_submit_request(pool_handle, invalid_wallet_handle, trustee_did,
nym_request)
assert ErrorCode.WalletInvalidHandle == e.value.error_code
@pytest.mark.asyncio
async def test_sign_and_submit_request_works_for_incompatible_wallet_and_pool(pool_handle):
wallet_handle = await create_and_open_wallet(pool_name="pool_2")
(my_did, _, _) = await signus.create_and_store_my_did(wallet_handle, '{"seed":"00000000000000000000000000000My1"}')
(trustee_did, _, _) = await signus.create_and_store_my_did(wallet_handle,
'{"seed":"000000000000000000000000Trustee1"}')
nym_request = await ledger.build_nym_request(trustee_did, my_did, None, None, None)
with pytest.raises(IndyError) as e:
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did,
nym_request)
assert ErrorCode.WalletIncompatiblePoolError == e.value.error_code
await wallet.close_wallet(wallet_handle)
|
{
"content_hash": "e4348286f9de7b97c6e420595fb4c425",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 119,
"avg_line_length": 45.95180722891566,
"alnum_prop": 0.6607236497115889,
"repo_name": "MRJCrunch/indy-sdk",
"id": "0d2a59eb89d331c27a81fc8c29f6a34f3316968b",
"size": "3814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrappers/python/tests/ledger/test_sign_and_submit_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "422160"
},
{
"name": "C++",
"bytes": "13207"
},
{
"name": "Groovy",
"bytes": "2445"
},
{
"name": "Java",
"bytes": "356302"
},
{
"name": "Objective-C",
"bytes": "620946"
},
{
"name": "Objective-C++",
"bytes": "590413"
},
{
"name": "Python",
"bytes": "267478"
},
{
"name": "Ruby",
"bytes": "4353"
},
{
"name": "Rust",
"bytes": "1487087"
},
{
"name": "Shell",
"bytes": "3186"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('espID', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('online', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='PWM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=200)),
('topic', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('pretty_name', models.CharField(default='', max_length=200)),
('setting', models.IntegerField(default=0)),
('on', models.BooleanField(default=False)),
('channel', models.IntegerField(default=0)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='esp.Device')),
],
),
migrations.CreateModel(
name='Switch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=200)),
('topic', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('pretty_name', models.CharField(default='', max_length=200)),
('on', models.BooleanField(default=False)),
('channel', models.IntegerField(default=0)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='esp.Device')),
('pwm', models.ManyToManyField(to='esp.PWM')),
],
),
]
|
{
"content_hash": "a6591ee703549c474a0860875010c759",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 114,
"avg_line_length": 41.90384615384615,
"alnum_prop": 0.5488756310234052,
"repo_name": "amillar2/light-django",
"id": "fcc0030da5d1e2e9c434ae671f96207fd3844750",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esp/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "HTML",
"bytes": "38048"
},
{
"name": "JavaScript",
"bytes": "197784"
},
{
"name": "Python",
"bytes": "23551"
}
],
"symlink_target": ""
}
|
from git.util import (
LazyMixin,
Iterable,
)
from .symbolic import SymbolicReference
__all__ = ["Reference"]
#{ Utilities
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper
#}END utilites
class Reference(SymbolicReference, LazyMixin, Iterable):
"""Represents a named reference to any object. Subclasses may apply restrictions though,
i.e. Heads can only point to commits."""
__slots__ = tuple()
_points_to_commits_only = False
_resolve_ref_on_create = True
_common_path_default = "refs"
def __init__(self, repo, path, check_path=True):
"""Initialize this instance
:param repo: Our parent repository
:param path:
Path relative to the .git/ directory pointing to the ref in question, i.e.
refs/heads/master
:param check_path: if False, you can provide any path. Otherwise the path must start with the
default path prefix of this type."""
if check_path and not path.startswith(self._common_path_default + '/'):
raise ValueError("Cannot instantiate %r from path %s" % (self.__class__.__name__, path))
super(Reference, self).__init__(repo, path)
def __str__(self):
return self.name
#{ Interface
def set_object(self, object, logmsg=None):
"""Special version which checks if the head-log needs an update as well"""
oldbinsha = None
if logmsg is not None:
head = self.repo.head
if not head.is_detached and head.ref == self:
oldbinsha = self.commit.binsha
# END handle commit retrieval
# END handle message is set
super(Reference, self).set_object(object, logmsg)
if oldbinsha is not None:
# /* from refs.c in git-source
# * Special hack: If a branch is updated directly and HEAD
# * points to it (may happen on the remote side of a push
# * for example) then logically the HEAD reflog should be
# * updated too.
# * A generic solution implies reverse symref information,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event (the direct
# * update of a branch) to be worth it. So let's cheat and
# * check with HEAD only which should cover 99% of all usage
# * scenarios (even 100% of the default ones).
# */
self.repo.head.log_append(oldbinsha, logmsg)
# END check if the head
# NOTE: Don't have to overwrite properties as the will only work without a the log
@property
def name(self):
""":return: (shortest) Name of this reference - it may contain path components"""
# first two path tokens are can be removed as they are
# refs/heads or refs/tags or refs/remotes
tokens = self.path.split('/')
if len(tokens) < 3:
return self.path # could be refs/HEAD
return '/'.join(tokens[2:])
@classmethod
def iter_items(cls, repo, common_path=None):
"""Equivalent to SymbolicReference.iter_items, but will return non-detached
references as well."""
return cls._iter_items(repo, common_path)
#}END interface
#{ Remote Interface
@property
@require_remote_ref_path
def remote_name(self):
"""
:return:
Name of the remote we are a reference of, such as 'origin' for a reference
named 'origin/master'"""
tokens = self.path.split('/')
# /refs/remotes/<remote name>/<branch_name>
return tokens[2]
@property
@require_remote_ref_path
def remote_head(self):
""":return: Name of the remote head itself, i.e. master.
:note: The returned name is usually not qualified enough to uniquely identify
a branch"""
tokens = self.path.split('/')
return '/'.join(tokens[3:])
#} END remote interface
|
{
"content_hash": "2ee22c887f3cd4ac403a13a5308d8226",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 101,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6074551311550851,
"repo_name": "hvnsweeting/GitPython",
"id": "8741ebb958b8dff2120260ac07ab68a2be36734b",
"size": "4346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git/refs/reference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2344"
},
{
"name": "Python",
"bytes": "660822"
},
{
"name": "Ruby",
"bytes": "46005"
}
],
"symlink_target": ""
}
|
from django import forms
from django.apps import apps
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_str
from django.utils.translation import gettext, gettext_lazy as _
from cms import api
from cms.apphook_pool import apphook_pool
from cms.cache.permissions import clear_permission_cache
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL
from cms.exceptions import PluginLimitReached
from cms.extensions import extension_pool
from cms.forms.validators import validate_overwrite_url, validate_relative_url, validate_url_uniqueness
from cms.forms.widgets import AppHookSelect, ApplicationConfigSelect, UserSelectAdminWidget
from cms.models import (
CMSPlugin, GlobalPagePermission, Page, PagePermission, PageType, PageUser, PageUserGroup, Placeholder, Title,
TreeNode,
)
from cms.models.permissionmodels import User
from cms.plugin_pool import plugin_pool
from cms.signals.apphook import set_restart_trigger
from cms.utils.compat.forms import UserChangeForm
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import (
get_current_user, get_subordinate_groups, get_subordinate_users, get_user_permission_level,
)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def get_page_changed_by_filter_choices():
# This is not site-aware
# Been like this forever
# Would be nice for it to filter out by site
values = (
Page
.objects
.filter(publisher_is_draft=True)
.distinct()
.order_by('changed_by')
.values_list('changed_by', flat=True)
)
yield ('', _('All'))
for value in values:
yield (value, value)
def get_page_template_filter_choices():
yield ('', _('All'))
yield from get_cms_setting('TEMPLATES')
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
field = f'can_{key}_{name}'
if data.get(field):
permission_accessor.add(permission)
elif field in data:
permission_accessor.remove(permission)
class CopyPermissionForm(forms.Form):
"""
Holds the specific field for permissions
"""
copy_permissions = forms.BooleanField(
label=_('Copy permissions'),
required=False,
initial=True,
)
class BasePageForm(forms.ModelForm):
_user = None
_site = None
_language = None
title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '320', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=320)
class Meta:
model = Page
fields = []
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class AddPageForm(BasePageForm):
source = forms.ModelChoiceField(
label=_('Page type'),
queryset=Page.objects.filter(
is_page_type=True,
publisher_is_draft=True,
),
required=False,
)
parent_node = forms.ModelChoiceField(
queryset=TreeNode.objects.all(),
required=False,
widget=forms.HiddenInput(),
)
class Meta:
model = Page
fields = ['source']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
source_field = self.fields.get('source')
if not source_field or source_field.widget.is_hidden:
return
root_page = PageType.get_root_page(site=self._site)
if root_page:
# Set the choicefield's choices to the various page_types
descendants = root_page.get_descendant_pages().filter(is_page_type=True)
titles = Title.objects.filter(page__in=descendants, language=self._language)
choices = [('', '---------')]
choices.extend((title.page_id, title.title) for title in titles)
source_field.choices = choices
else:
choices = []
if len(choices) < 2:
source_field.widget = forms.HiddenInput()
def clean(self):
data = self.cleaned_data
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
parent_node = data.get('parent_node')
if parent_node:
slug = data['slug']
parent_path = parent_node.item.get_path(self._language)
path = f'{parent_path}/{slug}' if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def clean_parent_node(self):
parent_node = self.cleaned_data.get('parent_node')
if parent_node and parent_node.site_id != self._site.pk:
raise ValidationError("Site doesn't match the parent's page site")
return parent_node
def create_translation(self, page):
data = self.cleaned_data
title_kwargs = {
'page': page,
'language': self._language,
'slug': data['slug'],
'path': data['path'],
'title': data['title'],
}
if 'menu_title' in data:
title_kwargs['menu_title'] = data['menu_title']
if 'page_title' in data:
title_kwargs['page_title'] = data['page_title']
if 'meta_description' in data:
title_kwargs['meta_description'] = data['meta_description']
return api.create_title(**title_kwargs)
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=False, in_navigation=True)
return new_page
def get_template(self):
return Page.TEMPLATE_DEFAULT
def save(self, *args, **kwargs):
source = self.cleaned_data.get('source')
parent = self.cleaned_data.get('parent_node')
if source:
new_page = self.from_source(source, parent=parent)
for lang in source.get_languages():
source._copy_contents(new_page, lang)
else:
new_page = super().save(commit=False)
new_page.template = self.get_template()
new_page.set_tree_node(self._site, target=parent, position='last-child')
new_page.save()
translation = self.create_translation(new_page)
if source:
extension_pool.copy_extensions(
source_page=source,
target_page=new_page,
languages=[translation.language],
)
is_first = not (
TreeNode
.objects
.get_for_site(self._site)
.exclude(pk=new_page.node_id)
.exists()
)
new_page.rescan_placeholders()
if is_first and not new_page.is_page_type:
# its the first page. publish it right away
new_page.publish(translation.language)
new_page.set_as_homepage(self._user)
new_page.clear_cache(menu=True)
return new_page
class AddPageTypeForm(AddPageForm):
menu_title = None
meta_description = None
page_title = None
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=False,
widget=forms.HiddenInput(),
)
def get_or_create_root(self):
"""
Creates the root node used to store all page types
for the current site if it doesn't exist.
"""
root_page = PageType.get_root_page(site=self._site)
if not root_page:
root_page = Page(
publisher_is_draft=True,
in_navigation=False,
is_page_type=True,
)
root_page.set_tree_node(self._site)
root_page.save()
if not root_page.has_translation(self._language):
api.create_title(
language=self._language,
title=gettext('Page Types'),
page=root_page,
slug=PAGE_TYPES_ID,
path=PAGE_TYPES_ID,
)
return root_page.node
def clean_parent_node(self):
parent_node = super().clean_parent_node()
if parent_node and not parent_node.item.is_page_type:
raise ValidationError("Parent has to be a page type.")
if not parent_node:
# parent was not explicitly selected.
# fallback to the page types root
parent_node = self.get_or_create_root()
return parent_node
def from_source(self, source, parent=None):
new_page = source.copy(
site=self._site,
parent_node=parent,
language=self._language,
translations=False,
permissions=False,
extensions=False,
)
new_page.update(is_page_type=True, in_navigation=False)
return new_page
def save(self, *args, **kwargs):
new_page = super().save(*args, **kwargs)
if not self.cleaned_data.get('source'):
# User has created a page-type via "Add page"
# instead of from another page.
new_page.update(
draft_only=True,
is_page_type=True,
in_navigation=False,
)
return new_page
class DuplicatePageForm(AddPageForm):
source = forms.ModelChoiceField(
queryset=Page.objects.drafts(),
required=True,
widget=forms.HiddenInput(),
)
class ChangePageForm(BasePageForm):
translation_fields = (
'slug',
'title',
'meta_description',
'menu_title',
'page_title',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
for field in self.translation_fields:
if field in self.fields:
self.fields[field].initial = getattr(self.title_obj, field)
def clean(self):
data = super().clean()
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
page = self.instance
if page.is_home:
data['path'] = ''
return data
if self.title_obj.has_url_overwrite:
data['path'] = self.title_obj.path
return data
if 'slug' not in self.fields:
# the {% edit_title_fields %} template tag
# allows users to edit specific fields for a translation.
# as a result, slug might not always be there.
return data
if page.parent_page:
slug = data['slug']
parent_path = page.parent_page.get_path(self._language)
path = f'{parent_path}/{slug}' if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=page,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def save(self, commit=True):
data = self.cleaned_data
cms_page = super().save(commit=False)
translation_data = {field: data[field]
for field in self.translation_fields if field in data}
if 'path' in data:
# The path key is set if
# the slug field is present in the form,
# or if the page being edited is the home page,
# or if the translation has a url override.
translation_data['path'] = data['path']
update_count = cms_page.update_translations(
self._language,
publisher_state=PUBLISHER_STATE_DIRTY,
**translation_data
)
if self._language in cms_page.title_cache:
del cms_page.title_cache[self._language]
if update_count == 0:
api.create_title(language=self._language, page=cms_page, **translation_data)
# _update_title_path_recursive should be called if the new page is the parent
# of already created children in multilingual sites.
cms_page._update_title_path_recursive(self._language, slug=self.data['slug'])
cms_page.clear_cache(menu=True)
return cms_page
class PublicationDatesForm(forms.ModelForm):
class Meta:
model = Page
fields = ['publication_date', 'publication_end_date']
def save(self, *args, **kwargs):
page = super().save(*args, **kwargs)
page.clear_cache(menu=True)
return page
class AdvancedSettingsForm(forms.ModelForm):
from cms.forms.fields import PageSmartLinkField
_user = None
_site = None
_language = None
application_urls = forms.ChoiceField(label=_('Application'),
choices=(), required=False,
help_text=_('Hook application to this page.'))
overwrite_url = forms.CharField(label=_('Overwrite URL'), max_length=255, required=False,
help_text=_('Keep this field empty if standard path should be used.'))
xframe_options = forms.ChoiceField(
choices=Page._meta.get_field('xframe_options').choices,
label=_('X Frame Options'),
help_text=_('Whether this page can be embedded in other pages or websites'),
initial=Page._meta.get_field('xframe_options').default,
required=False
)
redirect = PageSmartLinkField(label=_('Redirect'), required=False,
help_text=_('Redirects to this URL.'),
placeholder_text=_('Start typing...'),
ajax_view='admin:cms_page_get_published_pagelist',
)
# This is really a 'fake' field which does not correspond to any Page attribute
# But creates a stub field to be populate by js
application_configs = forms.CharField(
label=_('Application configurations'),
required=False,
widget=ApplicationConfigSelect,
)
fieldsets = (
(None, {
'fields': ('overwrite_url', 'redirect'),
}),
(_('Language independent options'), {
'fields': ('template', 'reverse_id', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', 'application_configs',
'xframe_options',)
})
)
class Meta:
model = Page
fields = [
'template', 'reverse_id', 'overwrite_url', 'redirect', 'soft_root', 'navigation_extenders',
'application_urls', 'application_namespace', "xframe_options",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title_obj = self.instance.get_title_obj(
language=self._language,
fallback=False,
force_reload=True,
)
if 'navigation_extenders' in self.fields:
navigation_extenders = self.get_navigation_extenders()
self.fields['navigation_extenders'].widget = forms.Select(
{}, [('', "---------")] + navigation_extenders)
if 'application_urls' in self.fields:
# Prepare a dict mapping the apps by class name ('PollApp') to
# their app_name attribute ('polls'), if any.
app_namespaces = {}
app_configs = {}
for hook in apphook_pool.get_apphooks():
app = apphook_pool.get_apphook(hook[0])
if app.app_name:
app_namespaces[hook[0]] = app.app_name
if app.app_config:
app_configs[hook[0]] = app
self.fields['application_urls'].widget = AppHookSelect(
attrs={'id': 'application_urls'},
app_namespaces=app_namespaces
)
self.fields['application_urls'].choices = [('', "---------")] + apphook_pool.get_apphooks()
page_data = self.data if self.data else self.initial
if app_configs:
self.fields['application_configs'].widget = ApplicationConfigSelect(
attrs={'id': 'application_configs'},
app_configs=app_configs,
)
if page_data.get('application_urls', False) and page_data['application_urls'] in app_configs:
configs = app_configs[page_data['application_urls']].get_configs()
self.fields['application_configs'].widget.choices = [(config.pk, force_str(config)) for config in configs]
try:
config = configs.get(namespace=self.initial['application_namespace'])
self.fields['application_configs'].initial = config.pk
except ObjectDoesNotExist:
# Provided apphook configuration doesn't exist (anymore),
# just skip it
# The user will choose another value anyway
pass
if 'redirect' in self.fields:
self.fields['redirect'].widget.language = self._language
self.fields['redirect'].initial = self.title_obj.redirect
if 'overwrite_url' in self.fields and self.title_obj.has_url_overwrite:
self.fields['overwrite_url'].initial = self.title_obj.path
def get_apphooks(self):
for hook in apphook_pool.get_apphooks():
yield (hook[0], apphook_pool.get_apphook(hook[0]))
def get_apphooks_with_config(self):
return {key: app for key, app in self.get_apphooks() if app.app_config}
def get_navigation_extenders(self):
return menu_pool.get_menus_by_attribute("cms_enabled", True)
def _check_unique_namespace_instance(self, namespace):
return Page.objects.drafts().on_site(self._site).filter(
application_namespace=namespace
).exclude(pk=self.instance.pk).exists()
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("overwrite_url"):
# Assuming that the user enters a full URL in the overwrite_url input.
# Here we validate it before publishing the page and if it contains
# reserved characters (e.g. $?:#), we add error in the form.
# issue 6934
url = cleaned_data.get("overwrite_url")
if url and not validate_overwrite_url(value=url):
self._errors['overwrite_url'] = self.error_class([_('You entered an invalid URL.')])
if self._errors:
# Fail fast if there's errors in the form
return cleaned_data
# Language has been validated already
# so we know it exists.
language_name = get_language_object(
self._language,
site_id=self._site.pk,
)['name']
if not self.title_obj.slug:
# This covers all cases where users try to edit
# page advanced settings without setting a title slug
# for page titles that already exist.
message = _("Please set the %(language)s slug "
"before editing its advanced settings.")
raise ValidationError(message % {'language': language_name})
if 'reverse_id' in self.fields:
reverse_id = cleaned_data['reverse_id']
if reverse_id:
lookup = Page.objects.drafts().on_site(self._site).filter(reverse_id=reverse_id)
if lookup.exclude(pk=self.instance.pk).exists():
self._errors['reverse_id'] = self.error_class(
[_('A page with this reverse URL id exists already.')])
apphook = cleaned_data.get('application_urls', None)
# The field 'application_namespace' is a misnomer. It should be
# 'instance_namespace'.
instance_namespace = cleaned_data.get('application_namespace', None)
application_config = cleaned_data.get('application_configs', None)
if apphook:
apphooks_with_config = self.get_apphooks_with_config()
# application_config wins over application_namespace
if apphook in apphooks_with_config and application_config:
# the value of the application config namespace is saved in
# the 'usual' namespace field to be backward compatible
# with existing apphooks
try:
appconfig_pk = forms.IntegerField(required=True).to_python(application_config)
except ValidationError:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
try:
config = apphooks_with_config[apphook].get_configs().get(pk=appconfig_pk)
except ObjectDoesNotExist:
self._errors['application_configs'] = ErrorList([
_('Invalid application config value')
])
return self.cleaned_data
if self._check_unique_namespace_instance(config.namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_configs'] = ErrorList([
_('An application instance using this configuration already exists.')
])
else:
self.cleaned_data['application_namespace'] = config.namespace
else:
if instance_namespace:
if self._check_unique_namespace_instance(instance_namespace):
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# The attribute on the apps 'app_name' is a misnomer, it should be
# 'application_namespace'.
application_namespace = apphook_pool.get_apphook(apphook).app_name
if application_namespace and not instance_namespace:
if self._check_unique_namespace_instance(application_namespace):
# Looks like there's already one with the default instance
# namespace defined.
self._errors['application_namespace'] = ErrorList([
_('An application instance with this name already exists.')
])
else:
# OK, there are zero instances of THIS app that use the
# default instance namespace, so, since the user didn't
# provide one, we'll use the default. NOTE: The following
# line is really setting the "instance namespace" of the
# new app to the app’s "application namespace", which is
# the default instance namespace.
self.cleaned_data['application_namespace'] = application_namespace
if instance_namespace and not apphook:
self.cleaned_data['application_namespace'] = None
if application_config and not apphook:
self.cleaned_data['application_configs'] = None
return self.cleaned_data
def clean_xframe_options(self):
if 'xframe_options' not in self.fields:
return # nothing to do, field isn't present
xframe_options = self.cleaned_data['xframe_options']
if xframe_options == '':
return Page._meta.get_field('xframe_options').default
return xframe_options
def clean_overwrite_url(self):
path_override = self.cleaned_data.get('overwrite_url')
if path_override:
path = path_override.strip('/')
else:
path = self.instance.get_path_for_slug(self.title_obj.slug, self._language)
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
exclude_page=self.instance,
)
self.cleaned_data['path'] = path
return path_override
def has_changed_apphooks(self):
changed_data = self.changed_data
if 'application_urls' in changed_data:
return True
return 'application_namespace' in changed_data
def update_apphooks(self):
# User has changed the apphooks on the page.
# Update the public version of the page to reflect this change immediately.
public_id = self.instance.publisher_public_id
self._meta.model.objects.filter(pk=public_id).update(
application_urls=self.instance.application_urls,
application_namespace=(self.instance.application_namespace or None),
)
# Connects the apphook restart handler to the request finished signal
set_restart_trigger()
def save(self, *args, **kwargs):
data = self.cleaned_data
page = super().save(*args, **kwargs)
page.update_translations(
self._language,
path=data['path'],
redirect=(data.get('redirect') or None),
publisher_state=PUBLISHER_STATE_DIRTY,
has_url_overwrite=bool(data.get('overwrite_url')),
)
is_draft_and_has_public = page.publisher_is_draft and page.publisher_public_id
if is_draft_and_has_public and self.has_changed_apphooks():
self.update_apphooks()
page.clear_cache(menu=True)
return page
class PagePermissionForm(forms.ModelForm):
class Meta:
model = Page
fields = ['login_required', 'limit_visibility_in_menu']
def save(self, *args, **kwargs):
page = super().save(*args, **kwargs)
page.clear_cache(menu=True)
clear_permission_cache()
return page
class PageTreeForm(forms.Form):
position = forms.IntegerField(initial=0, required=True)
target = forms.ModelChoiceField(queryset=Page.objects.none(), required=False)
def __init__(self, *args, **kwargs):
self.page = kwargs.pop('page')
self._site = kwargs.pop('site', Site.objects.get_current())
super().__init__(*args, **kwargs)
self.fields['target'].queryset = Page.objects.drafts().filter(
node__site=self._site,
is_page_type=self.page.is_page_type,
)
def get_root_nodes(self):
# TODO: this needs to avoid using the pages accessor directly
nodes = TreeNode.get_root_nodes()
is_page_type = self.page.is_page_type
return nodes.exclude(cms_pages__is_page_type=not is_page_type)
def get_tree_options(self):
position = self.cleaned_data['position']
target_page = self.cleaned_data.get('target')
parent_node = target_page.node if target_page else None
if parent_node:
return self._get_tree_options_for_parent(parent_node, position)
return self._get_tree_options_for_root(position)
def _get_tree_options_for_root(self, position):
siblings = self.get_root_nodes().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node as the last root node,
# relative to the current site.
return (siblings.reverse()[0], 'right')
return (target_node, 'left')
def _get_tree_options_for_parent(self, parent_node, position):
if position == 0:
return (parent_node, 'first-child')
siblings = parent_node.get_children().filter(site=self._site)
try:
target_node = siblings[position]
except IndexError:
# The position requested is not occupied.
# Add the node to be the parent's first child
return (parent_node, 'last-child')
return (target_node, 'left')
class MovePageForm(PageTreeForm):
def clean(self):
cleaned_data = super().clean()
if self.page.is_home and cleaned_data.get('target'):
self.add_error('target', force_str(_('You can\'t move the home page inside another page')))
return cleaned_data
def get_tree_options(self):
options = super().get_tree_options()
target_node, target_node_position = options
if target_node_position != 'left':
return (target_node, target_node_position)
node = self.page.node
node_is_first = node.path < target_node.path
if node_is_first and node.is_sibling_of(target_node):
# The node being moved appears before the target node
# and is a sibling of the target node.
# The user is moving from left to right.
target_node_position = 'right'
elif node_is_first:
# The node being moved appears before the target node
# but is not a sibling of the target node.
# The user is moving from right to left.
target_node_position = 'left'
else:
# The node being moved appears after the target node.
# The user is moving from right to left.
target_node_position = 'left'
return (target_node, target_node_position)
def move_page(self):
self.page.move_page(*self.get_tree_options())
class CopyPageForm(PageTreeForm):
source_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=True)
copy_permissions = forms.BooleanField(initial=False, required=False)
def copy_page(self):
target, position = self.get_tree_options()
copy_permissions = self.cleaned_data.get('copy_permissions', False)
new_page = self.page.copy_with_descendants(
target_node=target,
position=position,
copy_permissions=copy_permissions,
target_site=self._site,
)
new_page.clear_cache(menu=True)
return new_page
def _get_tree_options_for_root(self, position):
try:
return super()._get_tree_options_for_root(position)
except IndexError:
# The user is copying a page to a site with no pages
# Add the node as the last root node.
siblings = self.get_root_nodes().reverse()
return (siblings[0], 'right')
class ChangeListForm(forms.Form):
BOOLEAN_CHOICES = (
('', _('All')),
('1', _('Yes')),
('0', _('No')),
)
q = forms.CharField(required=False, widget=forms.HiddenInput())
in_navigation = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
template = forms.ChoiceField(required=False)
changed_by = forms.ChoiceField(required=False)
soft_root = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['changed_by'].choices = get_page_changed_by_filter_choices()
self.fields['template'].choices = get_page_template_filter_choices()
def is_filtered(self):
data = self.cleaned_data
if self.cleaned_data.get('q'):
return True
return any(bool(data.get(field.name)) for field in self.visible_fields())
def get_filter_items(self):
for field in self.visible_fields():
value = self.cleaned_data.get(field.name)
if value:
yield (field.name, value)
def run_filters(self, queryset):
for field, value in self.get_filter_items():
query = {f'{field}__exact': value}
queryset = queryset.filter(**query)
return queryset
class BasePermissionAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
permission_fields = self._meta.model.get_all_permissions()
for field in permission_fields:
if field not in self.base_fields:
setattr(self.instance, field, False)
class PagePermissionInlineAdminForm(BasePermissionAdminForm):
"""
Page permission inline admin form used in inline admin. Required, because
user and group queryset must be changed. User can see only users on the same
level or under him in chosen page tree, and users which were created by him,
but aren't assigned to higher page level than current user.
"""
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
user = get_current_user() # current user from threadlocals
site = Site.objects.get_current()
sub_users = get_subordinate_users(user, site)
limit_choices = True
use_raw_id = False
# Unfortunately, if there are > 500 users in the system, non-superusers
# won't see any benefit here because if we ask Django to put all the
# user PKs in limit_choices_to in the query string of the popup we're
# in danger of causing 414 errors so we fall back to the normal input
# widget.
if get_cms_setting('RAW_ID_USERS'):
if sub_users.count() < 500:
# If there aren't too many users, proceed as normal and use a
# raw id field with limit_choices_to
limit_choices = True
use_raw_id = True
elif get_user_permission_level(user, site) == ROOT_USER_LEVEL:
# If there are enough choices to possibly cause a 414 request
# URI too large error, we only proceed with the raw id field if
# the user is a superuser & thus can legitimately circumvent
# the limit_choices_to condition.
limit_choices = False
use_raw_id = True
# We don't use the fancy custom widget if the admin form wants to use a
# raw id field for the user
if use_raw_id:
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
# This check will be False if the number of users in the system
# is less than the threshold set by the RAW_ID_USERS setting.
if isinstance(self.fields['user'].widget, ForeignKeyRawIdWidget):
# We can't set a queryset on a raw id lookup, but we can use
# the fact that it respects the limit_choices_to parameter.
if limit_choices:
self.fields['user'].widget.rel.limit_choices_to = dict(
id__in=list(sub_users.values_list('pk', flat=True))
)
else:
self.fields['user'].widget = UserSelectAdminWidget()
self.fields['user'].queryset = sub_users
self.fields['user'].widget.user = user # assign current user
self.fields['group'].queryset = get_subordinate_groups(user, site)
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'grant_on',
]
model = PagePermission
class ViewRestrictionInlineAdminForm(BasePermissionAdminForm):
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
label=_('user'),
widget=HiddenInput(),
required=True,
)
can_view = forms.BooleanField(
label=_('can_view'),
widget=HiddenInput(),
initial=True,
)
class Meta:
fields = [
'user',
'group',
'grant_on',
'can_view',
]
model = PagePermission
def clean_can_view(self):
return True
class GlobalPagePermissionAdminForm(BasePermissionAdminForm):
class Meta:
fields = [
'user',
'group',
'can_add',
'can_change',
'can_delete',
'can_publish',
'can_change_advanced_settings',
'can_change_permissions',
'can_move_page',
'can_view',
'sites',
]
model = GlobalPagePermission
class GenericCmsPermissionForm(forms.ModelForm):
"""Generic form for User & Group permissions in cms
"""
_current_user = None
can_add_page = forms.BooleanField(label=_('Add'), required=False, initial=True)
can_change_page = forms.BooleanField(label=_('Change'), required=False, initial=True)
can_delete_page = forms.BooleanField(label=_('Delete'), required=False)
# pageuser is for pageuser & group - they are combined together,
# and read out from PageUser model
can_add_pageuser = forms.BooleanField(label=_('Add'), required=False)
can_change_pageuser = forms.BooleanField(label=_('Change'), required=False)
can_delete_pageuser = forms.BooleanField(label=_('Delete'), required=False)
can_add_pagepermission = forms.BooleanField(label=_('Add'), required=False)
can_change_pagepermission = forms.BooleanField(label=_('Change'), required=False)
can_delete_pagepermission = forms.BooleanField(label=_('Delete'), required=False)
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
initial = kwargs.get('initial') or {}
if instance:
initial = initial or {}
initial.update(self.populate_initials(instance))
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
def clean(self):
data = super().clean()
# Validate Page options
if not data.get('can_change_page'):
if data.get('can_add_page'):
message = _("Users can't create a page without permissions "
"to change the created page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_page'):
message = _("Users can't delete a page without permissions "
"to change the page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_add_pagepermission'):
message = _("Users can't set page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change a page. Edit permissions required.")
raise ValidationError(message)
# Validate PagePermission options
if not data.get('can_change_pagepermission'):
if data.get('can_add_pagepermission'):
message = _("Users can't create page permissions without permissions "
"to change the created permission. Edit permissions required.")
raise ValidationError(message)
if data.get('can_delete_pagepermission'):
message = _("Users can't delete page permissions without permissions "
"to change permissions. Edit permissions required.")
raise ValidationError(message)
def populate_initials(self, obj):
"""Read out permissions from permission system.
"""
initials = {}
permission_accessor = get_permission_accessor(obj)
for model in (Page, PageUser, PagePermission):
name = model.__name__.lower()
content_type = ContentType.objects.get_for_model(model)
permissions = permission_accessor.filter(content_type=content_type).values_list('codename', flat=True)
for key in ('add', 'change', 'delete'):
codename = get_permission_codename(key, model._meta)
initials[f'can_{key}_{name}'] = codename in permissions
return initials
def save(self, commit=True):
instance = super().save(commit=False)
instance.save()
save_permissions(self.cleaned_data, instance)
return instance
class PageUserAddForm(forms.ModelForm):
_current_user = None
user = forms.ModelChoiceField(queryset=User.objects.none())
class Meta:
fields = ['user']
model = PageUser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].queryset = self.get_subordinates()
def get_subordinates(self):
subordinates = get_subordinate_users(self._current_user, self._current_site)
return subordinates.filter(pageuser__isnull=True)
def save(self, commit=True):
user = self.cleaned_data['user']
instance = super().save(commit=False)
instance.created_by = self._current_user
for field in user._meta.fields:
# assign all the fields - we can do this, because object is
# subclassing User (one to one relation)
value = getattr(user, field.name)
setattr(instance, field.name, value)
if commit:
instance.save()
return instance
class PageUserChangeForm(UserChangeForm):
_current_user = None
class Meta:
fields = '__all__'
model = PageUser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self._current_user.is_superuser:
# Limit permissions to include only
# the permissions available to the manager.
permissions = self.get_available_permissions()
self.fields['user_permissions'].queryset = permissions
# Limit groups to include only those where
# the manager is a member.
self.fields['groups'].queryset = self.get_available_groups()
def get_available_permissions(self):
permissions = self._current_user.get_all_permissions()
permission_codes = (perm.rpartition('.')[-1] for perm in permissions)
return Permission.objects.filter(codename__in=permission_codes)
def get_available_groups(self):
return self._current_user.groups.all()
class PageUserGroupForm(GenericCmsPermissionForm):
class Meta:
model = PageUserGroup
fields = ('name', )
def save(self, commit=True):
if not self.instance.pk:
self.instance.created_by = self._current_user
return super().save(commit=commit)
class PluginAddValidationForm(forms.Form):
placeholder_id = forms.ModelChoiceField(
queryset=Placeholder.objects.all(),
required=True,
)
plugin_language = forms.CharField(required=True)
plugin_parent = forms.ModelChoiceField(
CMSPlugin.objects.all(),
required=False,
)
plugin_type = forms.CharField(required=True)
def clean_plugin_type(self):
plugin_type = self.cleaned_data['plugin_type']
try:
plugin_pool.get_plugin(plugin_type)
except KeyError:
message = gettext("Invalid plugin type '%s'") % plugin_type
raise ValidationError(message)
return plugin_type
def clean(self):
from cms.utils.plugins import has_reached_plugin_limit
data = self.cleaned_data
if self.errors:
return data
language = data['plugin_language']
placeholder = data['placeholder_id']
parent_plugin = data.get('plugin_parent')
if language not in get_language_list():
message = gettext("Language must be set to a supported language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin:
if parent_plugin.language != language:
message = gettext("Parent plugin language must be same as language!")
self.add_error('plugin_language', message)
return self.cleaned_data
if parent_plugin.placeholder_id != placeholder.pk:
message = gettext("Parent plugin placeholder must be same as placeholder!")
self.add_error('placeholder_id', message)
return self.cleaned_data
page = placeholder.page
template = page.get_template() if page else None
try:
has_reached_plugin_limit(
placeholder,
data['plugin_type'],
language,
template=template,
parent_plugin=parent_plugin
)
except PluginLimitReached as error:
self.add_error(None, force_str(error))
return self.cleaned_data
class RequestToolbarForm(forms.Form):
obj_id = forms.CharField(required=False)
obj_type = forms.CharField(required=False)
cms_path = forms.CharField(required=False)
def clean(self):
data = self.cleaned_data
obj_id = data.get('obj_id')
obj_type = data.get('obj_type')
if not bool(obj_id or obj_type):
return data
if (obj_id and not obj_type) or (obj_type and not obj_id):
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
app, sep, model = obj_type.rpartition('.')
try:
model_class = apps.get_model(app_label=app, model_name=model)
except LookupError:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
try:
generic_obj = model_class.objects.get(pk=obj_id)
except model_class.DoesNotExist:
message = 'Invalid object lookup. Both obj_id and obj_type are required'
raise forms.ValidationError(message)
else:
data['attached_obj'] = generic_obj
return data
def clean_cms_path(self):
path = self.cleaned_data.get('cms_path')
if path:
validate_relative_url(path)
return path
|
{
"content_hash": "9fdc93b37716279d3de441ca48015fd0",
"timestamp": "",
"source": "github",
"line_count": 1365,
"max_line_length": 126,
"avg_line_length": 36.353846153846156,
"alnum_prop": 0.5832980674284102,
"repo_name": "rsalmaso/django-cms",
"id": "44a51fc1436e1f16e60f4c64d22031ae961bef8a",
"size": "49625",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/admin/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
}
|
from rpc import *
class RpcApiNotFound(Exception): pass
class RpcBadParams(Exception): pass
class Client:
def __init__(self, url, keyFile, apis='*', value='60', mode='minu', caller='anonymous'):
self._auth = AutoAuth(caller, url, keyFile, apis, value, mode)
self._cube = AutoAuthApiCube(url, self._auth)
self._apis = {}
self.url = url
self.setHttpOpt = self._cube.setHttpOpt
self.stub = self._auth.stub
self.newStub = self._auth.newStub
self.help = lambda: str(self.__getattr__('apiList')())
self.__str__ = self.help
self.__repr__ = self.help
def __getattr__(self, _api):
if _api.startswith('_'):
raise AttributeError, _api
api = self._apis.get(_api)
if not api:
api = Api(self._cube, _api)
self._apis[_api] = api
return api
class Api:
def __init__(self, caller, api):
self._caller = caller
self._api = api
self._params = None
self._doc = None
def __call__(self, *v, **kv):
params = self._getParams()
for i in range(0, len(v)):
kv[params[i]] = str(v[i])
for param in params:
if not kv.has_key(param):
raise RpcBadParams(self._api + ": missed '%s' in params" % param)
if kv.has_key('_post'):
_post = kv['_post']
kv.pop('_post')
return self._caller.call(self._api, _post = _post, **kv)
return self._caller.call(self._api, **kv)
def help(self):
if self._doc is None:
result = self._caller.call('apiInfo', api=self._api)
if not isinstance(result, list) or result[0] != 'ok':
raise RpcFailed(result)
self._doc = str(result[1])
if self._doc == 'apiNotFound':
raise RpcApiNotFound(self._api)
return self._doc
def __str__(self):
return self.help()
def __repr__(self):
return self.help()
def _getParams(self):
if self._params is None:
info = self._caller.call('apiInfo', api=self._api)
if not info or not isinstance(info, list) or len(info) < 2 or info[0] != 'ok':
raise RpcFailed(self._api, info)
if info[1] == 'apiNotFound':
raise RpcApiNotFound(self._api)
self._params = info[1]['params']
return self._params
|
{
"content_hash": "19f743d363d9449992418a232f90e5c1",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 34.54794520547945,
"alnum_prop": 0.5114988104678826,
"repo_name": "innerr/stars",
"id": "0403404b43722dd61922bca64ec22bc3434327e8",
"size": "2555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "290896"
}
],
"symlink_target": ""
}
|
import mock
import pytest
import datetime
from django.utils import timezone
from rest_framework import exceptions
from waffle.testutils import (
override_switch,
)
from osf import features
from osf.utils.permissions import READ
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from api_tests.subjects.mixins import UpdateSubjectsMixin
from framework.auth.core import Auth
from osf.models import (
NodeLicense,
PreprintContributor,
PreprintLog
)
from osf.utils.permissions import WRITE
from osf.utils.workflows import DefaultStates
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintProviderFactory,
)
from website.settings import DOI_FORMAT
from website.language import SWITCH_VALIDATOR_ERROR
def build_preprint_update_payload(
node_id, attributes=None, relationships=None,
jsonapi_type='preprints'):
payload = {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes,
'relationships': relationships
}
}
return payload
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestPreprintDetail:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def preprint_pre_mod(self, user):
return PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False, creator=user)
@pytest.fixture()
def moderator(self, preprint_pre_mod):
mod = AuthUserFactory()
preprint_pre_mod.provider.get_group('moderator').user_set.add(mod)
return mod
@pytest.fixture()
def unpublished_preprint(self, user):
return PreprintFactory(creator=user, is_published=False)
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def unpublished_url(self, unpublished_preprint):
return '/{}preprints/{}/'.format(API_BASE, unpublished_preprint._id)
@pytest.fixture()
def res(self, app, url):
return app.get(url)
@pytest.fixture()
def data(self, res):
return res.json['data']
def test_preprint_detail(self, app, user, preprint, url, res, data):
# test_preprint_detail_success
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_preprint_top_level
assert data['type'] == 'preprints'
assert data['id'] == preprint._id
# test title in preprint data
assert data['attributes']['title'] == preprint.title
# test contributors in preprint data
assert data['relationships'].get('contributors', None)
assert data['relationships']['contributors'].get('data', None) is None
# test no node attached to preprint
assert data['relationships']['node'].get('data', None) is None
# test_preprint_node_deleted doesn't affect preprint
deleted_node = ProjectFactory(creator=user, is_deleted=True)
deleted_preprint = PreprintFactory(project=deleted_node, creator=user)
deleted_preprint_url = '/{}preprints/{}/'.format(
API_BASE, deleted_preprint._id)
deleted_preprint_res = app.get(
deleted_preprint_url, expect_errors=True)
assert deleted_preprint_res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test node relationship exists when attached to preprint
node = ProjectFactory(creator=user)
preprint_with_node = PreprintFactory(project=node, creator=user)
preprint_with_node_url = '/{}preprints/{}/'.format(
API_BASE, preprint_with_node._id)
preprint_with_node_res = app.get(
preprint_with_node_url)
node_data = preprint_with_node_res.json['data']['relationships']['node']['data']
assert node_data.get('id', None) == preprint_with_node.node._id
assert node_data.get('type', None) == 'nodes'
def test_withdrawn_preprint(self, app, user, moderator, preprint_pre_mod):
# test_retracted_fields
url = '/{}preprints/{}/'.format(API_BASE, preprint_pre_mod._id)
res = app.get(url, auth=user.auth)
data = res.json['data']
assert not data['attributes']['date_withdrawn']
assert 'withdrawal_justification' not in data['attributes']
assert 'ever_public' not in data['attributes']
## retracted and not ever_public
assert not preprint_pre_mod.ever_public
preprint_pre_mod.date_withdrawn = timezone.now()
preprint_pre_mod.withdrawal_justification = 'assumptions no longer apply'
preprint_pre_mod.save()
assert preprint_pre_mod.is_retracted
res = app.get(url, expect_errors=True)
assert res.status_code == 404
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
res = app.get(url, auth=moderator.auth)
assert res.status_code == 200
## retracted and ever_public (True)
preprint_pre_mod.ever_public = True
preprint_pre_mod.save()
res = app.get(url, auth=user.auth)
data = res.json['data']
assert data['attributes']['date_withdrawn']
assert 'withdrawal_justification' in data['attributes']
assert 'assumptions no longer apply' == data['attributes']['withdrawal_justification']
assert 'date_withdrawn' in data['attributes']
def test_embed_contributors(self, app, user, preprint):
url = '/{}preprints/{}/?embed=contributors'.format(
API_BASE, preprint._id)
res = app.get(url, auth=user.auth)
embeds = res.json['data']['embeds']
ids = preprint.contributors.all().values_list('guids___id', flat=True)
ids = ['{}-{}'.format(preprint._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert contrib['id'] in ids
def test_preprint_doi_link_absent_in_unpublished_preprints(
self, app, user, unpublished_preprint, unpublished_url):
res = app.get(unpublished_url, auth=user.auth)
assert res.json['data']['id'] == unpublished_preprint._id
assert res.json['data']['attributes']['is_published'] is False
assert 'preprint_doi' not in res.json['data']['links'].keys()
assert res.json['data']['attributes']['preprint_doi_created'] is None
def test_published_preprint_doi_link_not_returned_before_doi_request(
self, app, user, unpublished_preprint, unpublished_url):
unpublished_preprint.is_published = True
unpublished_preprint.date_published = timezone.now()
unpublished_preprint.save()
res = app.get(unpublished_url, auth=user.auth)
assert res.json['data']['id'] == unpublished_preprint._id
assert res.json['data']['attributes']['is_published'] is True
assert 'preprint_doi' not in res.json['data']['links'].keys()
def test_published_preprint_doi_link_returned_after_doi_request(
self, app, user, preprint, url):
expected_doi = DOI_FORMAT.format(
prefix=preprint.provider.doi_prefix,
guid=preprint._id
)
preprint.set_identifier_values(doi=expected_doi)
res = app.get(url, auth=user.auth)
assert res.json['data']['id'] == preprint._id
assert res.json['data']['attributes']['is_published'] is True
assert 'preprint_doi' in res.json['data']['links'].keys()
assert res.json['data']['links']['preprint_doi'] == 'https://doi.org/{}'.format(
expected_doi)
assert res.json['data']['attributes']['preprint_doi_created']
def test_preprint_embed_identifiers(self, app, user, preprint, url):
embed_url = url + '?embed=identifiers'
res = app.get(embed_url)
assert res.status_code == 200
link = res.json['data']['relationships']['identifiers']['links']['related']['href']
assert '{}identifiers/'.format(url) in link
@pytest.mark.django_db
class TestPreprintDelete:
@pytest.fixture()
def unpublished_preprint(self, user):
return PreprintFactory(creator=user, is_published=False)
@pytest.fixture()
def published_preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def url(self, user):
return '/{}preprints/{{}}/'.format(API_BASE)
def test_cannot_delete_preprints(
self, app, user, url, unpublished_preprint, published_preprint):
res = app.delete(url.format(unpublished_preprint._id), auth=user.auth, expect_errors=True)
assert res.status_code == 405
assert unpublished_preprint.deleted is None
res = app.delete(url.format(published_preprint._id), auth=user.auth, expect_errors=True)
assert res.status_code == 405
assert published_preprint.deleted is None
@pytest.mark.django_db
@pytest.mark.enable_enqueue_task
class TestPreprintUpdate:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def subject(self):
return SubjectFactory()
def test_update_preprint_permission_denied(self, app, preprint, url):
update_doi_payload = build_preprint_update_payload(
preprint._id, attributes={'article_doi': '10.123/456/789'})
noncontrib = AuthUserFactory()
res = app.patch_json_api(
url,
update_doi_payload,
auth=noncontrib.auth,
expect_errors=True)
assert res.status_code == 403
res = app.patch_json_api(url, update_doi_payload, expect_errors=True)
assert res.status_code == 401
read_contrib = AuthUserFactory()
preprint.add_contributor(read_contrib, READ, save=True)
res = app.patch_json_api(
url,
update_doi_payload,
auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
def test_update_original_publication_date_to_none(self, app, preprint, url):
# Original pub date accidentally set, need to remove
write_contrib = AuthUserFactory()
preprint.add_contributor(write_contrib, WRITE, save=True)
preprint.original_publication_date = '2013-12-11 10:09:08.070605+00:00'
preprint.save()
update_payload = build_preprint_update_payload(
preprint._id, attributes={
'original_publication_date': None,
}
)
res = app.patch_json_api(
url,
update_payload,
auth=write_contrib.auth,
)
assert res.status_code == 200
preprint.reload()
assert preprint.original_publication_date is None
def test_update_preprint_permission_write_contrib(self, app, preprint, url):
write_contrib = AuthUserFactory()
preprint.add_contributor(write_contrib, WRITE, save=True)
doi = '10.123/456/789'
original_publication_date = '2013-12-11 10:09:08.070605+00:00'
license_record = {
'year': '2015',
'copyright_holders': ['Tonya Bateman']
}
license = NodeLicense.objects.filter(name='No license').first()
title = 'My Preprint Title'
description = 'My Preprint Description'
tags = ['test tag']
node = ProjectFactory(creator=write_contrib)
new_file = test_utils.create_test_preprint_file(
preprint, write_contrib, filename='shook_that_mans_hand.pdf')
update_payload = build_preprint_update_payload(
preprint._id, attributes={
'original_publication_date': original_publication_date,
'doi': doi,
'license_record': license_record,
'title': title,
'description': description,
'tags': tags,
}, relationships={'node': {'data': {'type': 'nodes', 'id': node._id}},
'primary_file': {'data': {'type': 'file', 'id': new_file._id}},
'license': {'data': {'type': 'licenses', 'id': license._id}}}
)
res = app.patch_json_api(
url,
update_payload,
auth=write_contrib.auth,
)
assert res.status_code == 200
preprint.reload()
assert preprint.article_doi == doi
assert str(preprint.original_publication_date) == original_publication_date
assert preprint.license.node_license == license
assert preprint.license.year == license_record['year']
assert preprint.license.copyright_holders == license_record['copyright_holders']
assert preprint.title == title
assert preprint.description == description
assert preprint.tags.first().name == tags[0]
assert preprint.node == node
assert preprint.primary_file == new_file
def test_update_published_write_contrib(self, app, preprint, url):
preprint.is_published = False
preprint.save()
write_contrib = AuthUserFactory()
preprint.add_contributor(write_contrib, WRITE, save=True)
update_payload = build_preprint_update_payload(
preprint._id, attributes={
'is_published': 'true'
}
)
res = app.patch_json_api(
url,
update_payload,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert preprint.is_published is False
def test_update_node(self, app, user, preprint, url):
assert preprint.node is None
node = ProjectFactory(creator=user)
update_node_payload = build_preprint_update_payload(
preprint._id, relationships={'node': {'data': {'type': 'nodes', 'id': node._id}}}
)
res = app.patch_json_api(url, update_node_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['relationships']['node']['data']['id'] == node._id
preprint.reload()
assert preprint.node == node
def test_update_node_permissions(self, app, user, preprint, url):
assert preprint.node is None
node = ProjectFactory()
update_node_payload = build_preprint_update_payload(
preprint._id, relationships={'node': {'data': {'type': 'nodes', 'id': node._id}}}
)
res = app.patch_json_api(url, update_node_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 403
preprint.reload()
assert preprint.node is None
def test_update_node_existing_preprint(self, app, user, preprint, url):
assert preprint.node is None
node = ProjectFactory(creator=user)
# Create preprint with same provider on node
PreprintFactory(creator=user, project=node, provider=preprint.provider)
update_node_payload = build_preprint_update_payload(
preprint._id, relationships={'node': {'data': {'type': 'nodes', 'id': node._id}}}
)
res = app.patch_json_api(url, update_node_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 200
preprint.reload()
assert preprint.node == node
def test_update_deleted_node(self, app, user, preprint, url):
assert preprint.node is None
node = ProjectFactory(creator=user)
node.is_deleted = True
node.save()
update_node_payload = build_preprint_update_payload(
preprint._id, relationships={'node': {'data': {'type': 'nodes', 'id': node._id}}}
)
res = app.patch_json_api(url, update_node_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Cannot attach a deleted project to a preprint.'
preprint.reload()
assert preprint.node is None
def test_update_primary_file(self, app, user, preprint, url):
new_file = test_utils.create_test_preprint_file(
preprint, user, filename='shook_that_mans_hand.pdf')
relationships = {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
assert preprint.primary_file != new_file
update_file_payload = build_preprint_update_payload(
preprint._id, relationships=relationships)
res = app.patch_json_api(url, update_file_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.primary_file == new_file
log = preprint.logs.latest()
assert log.action == 'file_updated'
assert log.params.get('preprint') == preprint._id
def test_update_preprints_with_none_type(self, app, user, preprint, url):
payload = {
'data': {
'id': preprint._id,
'type': None,
'attributes': None,
'relationship': None
}
}
res = app.patch_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
def test_update_preprints_with_no_type(self, app, user, preprint, url):
payload = {
'data': {
'id': preprint._id,
'attributes': None,
'relationship': None
}
}
res = app.patch_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
def test_update_preprints_with_wrong_type(self, app, user, preprint, url):
update_file_payload = build_preprint_update_payload(preprint._id, jsonapi_type='Nonsense')
res = app.patch_json_api(url, update_file_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 409
def test_new_primary_not_in_node(self, app, user, preprint, url):
project = ProjectFactory()
file_for_project = test_utils.create_test_file(
project, user, filename='six_pack_novak.pdf')
relationships = {
'primary_file': {
'data': {
'type': 'file',
'id': file_for_project._id
}
}
}
update_file_payload = build_preprint_update_payload(
preprint._id, relationships=relationships)
res = app.patch_json_api(
url, update_file_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
preprint.reload()
assert preprint.primary_file != file_for_project
def test_update_original_publication_date(self, app, user, preprint, url):
date = timezone.now() - datetime.timedelta(days=365)
update_payload = build_preprint_update_payload(
preprint._id, attributes={'original_publication_date': str(date)}
)
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.original_publication_date == date
def test_update_article_doi(self, app, user, preprint, url):
new_doi = '10.1234/ASDFASDF'
assert preprint.article_doi != new_doi
update_payload = build_preprint_update_payload(
preprint._id, attributes={'doi': new_doi})
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.article_doi == new_doi
preprint_detail = app.get(url, auth=user.auth).json['data']
assert preprint_detail['links']['doi'] == 'https://doi.org/{}'.format(
new_doi)
def test_title_has_a_512_char_limit(self, app, user, preprint, url):
new_title = 'a' * 513
update_title_payload = build_preprint_update_payload(
preprint._id,
attributes={
'title': new_title,
}
)
res = app.patch_json_api(
url,
update_title_payload,
auth=user.auth,
expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Ensure this field has no more than 512 characters.'
preprint.reload()
assert preprint.title != new_title
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_update_description_and_title(
self, mock_preprint_updated, app, user, preprint, url):
new_title = 'Brother Nero'
new_description = 'I knew you\'d come!'
assert preprint.description != new_description
assert preprint.title != new_title
update_title_description_payload = build_preprint_update_payload(
preprint._id,
attributes={
'title': new_title,
'description': new_description,
}
)
res = app.patch_json_api(
url,
update_title_description_payload,
auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.description == new_description
assert preprint.title == new_title
assert mock_preprint_updated.called
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_update_tags(self, mock_update_doi_metadata, app, user, preprint, url):
new_tags = ['hey', 'sup']
for tag in new_tags:
assert tag not in preprint.tags.all().values_list('name', flat=True)
update_tags_payload = build_preprint_update_payload(
preprint._id,
attributes={
'tags': new_tags
}
)
res = app.patch_json_api(url, update_tags_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert sorted(
list(
preprint.tags.all().values_list(
'name',
flat=True))
) == new_tags
assert mock_update_doi_metadata.called
# No tags
update_tags_payload = build_preprint_update_payload(
preprint._id,
attributes={
'tags': []
}
)
res = app.patch_json_api(url, update_tags_payload, auth=user.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.tags.count() == 0
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_update_contributors(
self, mock_update_doi_metadata, app, user, preprint, url):
new_user = AuthUserFactory()
contributor_payload = {
'data': {
'attributes': {
'bibliographic': True,
'permission': WRITE,
'send_email': False
},
'type': 'contributors',
'relationships': {
'users': {
'data': {
'id': new_user._id,
'type': 'users'
}
}
}
}
}
contributor_url = url + 'contributors/'
res = app.post_json_api(
contributor_url,
contributor_payload,
auth=user.auth)
assert res.status_code == 201
assert new_user in preprint.contributors
assert preprint.has_permission(new_user, WRITE)
assert PreprintContributor.objects.get(preprint=preprint, user=new_user).visible is True
assert mock_update_doi_metadata.called
def test_cannot_set_primary_file(self, app, user, preprint, url):
preprint.node = None
preprint.save()
# test_write_contrib_can_attempt_to_set_primary_file
read_write_contrib = AuthUserFactory()
preprint.add_contributor(
read_write_contrib,
permissions=WRITE,
auth=Auth(user), save=True)
new_file = test_utils.create_test_preprint_file(
preprint, user, filename='lovechild_reason.pdf')
data = {
'data': {
'type': 'preprints',
'id': preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = app.patch_json_api(
url, data,
auth=read_write_contrib.auth,
expect_errors=True)
assert res.status_code == 200
# test_noncontrib_cannot_set_primary_file
non_contrib = AuthUserFactory()
new_file = test_utils.create_test_preprint_file(
preprint, user, filename='flowerchild_nik.pdf')
data = {
'data': {
'type': 'preprints',
'id': preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = app.patch_json_api(
url, data,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
def test_update_published(self, app, user):
unpublished = PreprintFactory(creator=user, is_published=False)
url = '/{}preprints/{}/'.format(API_BASE, unpublished._id)
payload = build_preprint_update_payload(
unpublished._id, attributes={'is_published': True})
app.patch_json_api(url, payload, auth=user.auth)
unpublished.reload()
assert unpublished.is_published
def test_update_published_does_not_make_node_public(
self, app, user):
project = ProjectFactory(creator=user)
unpublished = PreprintFactory(creator=user, is_published=False, project=project)
assert not unpublished.node.is_public
url = '/{}preprints/{}/'.format(API_BASE, unpublished._id)
payload = build_preprint_update_payload(
unpublished._id, attributes={'is_published': True})
app.patch_json_api(url, payload, auth=user.auth)
unpublished.node.reload()
unpublished.reload()
assert unpublished.node.is_public is False
assert unpublished.is_public
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_update_preprint_task_called_on_api_update(
self, mock_on_preprint_updated, app, user, preprint, url):
update_doi_payload = build_preprint_update_payload(
preprint._id, attributes={'doi': '10.1234/ASDFASDF'})
app.patch_json_api(url, update_doi_payload, auth=user.auth)
assert mock_on_preprint_updated.called
def test_update_has_coi(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(
preprint._id,
attributes={'has_coi': True}
)
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
with override_switch(features.SLOAN_COI_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['has_coi']
preprint.reload()
assert preprint.has_coi
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_HAS_COI
assert log.params == {'preprint': preprint._id, 'user': user._id, 'value': True}
def test_update_conflict_of_interest_statement(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(
preprint._id,
attributes={'conflict_of_interest_statement': 'Owns shares in Closed Science Corporation.'}
)
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
preprint.has_coi = False
preprint.save()
with override_switch(features.SLOAN_COI_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You do not have the ability to edit a conflict of interest while the ' \
'has_coi field is set to false or unanswered'
preprint.has_coi = True
preprint.save()
with override_switch(features.SLOAN_COI_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['conflict_of_interest_statement'] ==\
'Owns shares in Closed Science Corporation.'
preprint.reload()
assert preprint.conflict_of_interest_statement == 'Owns shares in Closed Science Corporation.'
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_COI_STATEMENT
assert log.params == {'preprint': preprint._id, 'user': user._id}
def test_update_has_data_links(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(preprint._id, attributes={'has_data_links': 'available'})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['has_data_links'] == 'available'
preprint.reload()
assert preprint.has_data_links
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_HAS_DATA_LINKS
assert log.params == {'value': 'available', 'user': user._id, 'preprint': preprint._id}
def test_update_why_no_data(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(preprint._id, attributes={'why_no_data': 'My dog ate it.'})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot edit this statement while your data links availability' \
' is set to true or is unanswered.'
preprint.has_data_links = 'no'
preprint.save()
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['why_no_data'] == 'My dog ate it.'
preprint.reload()
assert preprint.why_no_data
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_WHY_NO_DATA
assert log.params == {'user': user._id, 'preprint': preprint._id}
def test_update_data_links(self, app, user, preprint, url):
data_links = ['http://www.JasonKelce.com', 'http://www.ItsTheWholeTeam.com/']
update_payload = build_preprint_update_payload(preprint._id, attributes={'data_links': data_links})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
preprint.has_data_links = 'no'
preprint.save()
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot edit this statement while your data links availability' \
' is set to false or is unanswered.'
preprint.has_data_links = 'available'
preprint.save()
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['data_links'] == data_links
preprint.reload()
assert preprint.data_links == data_links
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_DATA_LINKS
assert log.params == {'user': user._id, 'preprint': preprint._id}
update_payload = build_preprint_update_payload(preprint._id, attributes={'data_links': 'maformed payload'})
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "str".'
@override_switch(features.SLOAN_DATA_INPUT, active=True)
def test_invalid_data_links(self, app, user, preprint, url):
preprint.has_data_links = 'available'
preprint.save()
update_payload = build_preprint_update_payload(preprint._id, attributes={'data_links': ['thisaintright']})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Enter a valid URL.'
def test_update_has_prereg_links(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(preprint._id, attributes={'has_prereg_links': 'available'})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['has_prereg_links'] == 'available'
preprint.reload()
assert preprint.has_prereg_links
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_HAS_PREREG_LINKS
assert log.params == {'value': 'available', 'user': user._id, 'preprint': preprint._id}
@override_switch(features.SLOAN_PREREG_INPUT, active=True)
def test_invalid_prereg_links(self, app, user, preprint, url):
preprint.has_prereg_links = 'available'
preprint.save()
update_payload = build_preprint_update_payload(preprint._id, attributes={'prereg_links': ['thisaintright']})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Enter a valid URL.'
@override_switch(features.SLOAN_DATA_INPUT, active=True)
def test_no_data_links_clears_links(self, app, user, preprint, url):
preprint.has_data_links = 'available'
preprint.data_links = ['http://www.apple.com']
preprint.save()
update_payload = build_preprint_update_payload(preprint._id, attributes={'has_data_links': 'no'})
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['has_data_links'] == 'no'
assert res.json['data']['attributes']['data_links'] == []
@override_switch(features.SLOAN_PREREG_INPUT, active=True)
def test_no_prereg_links_clears_links(self, app, user, preprint, url):
preprint.has_prereg_links = 'available'
preprint.prereg_links = ['http://example.com']
preprint.prereg_link_info = 'prereg_analysis'
preprint.save()
update_payload = build_preprint_update_payload(preprint._id, attributes={'has_prereg_links': 'no'})
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['has_prereg_links'] == 'no'
assert res.json['data']['attributes']['prereg_links'] == []
assert not res.json['data']['attributes']['prereg_link_info']
def test_update_why_no_prereg(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(preprint._id, attributes={'why_no_prereg': 'My dog ate it.'})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot edit this statement while your prereg links availability' \
' is set to true or is unanswered.'
preprint.has_prereg_links = False
preprint.save()
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['why_no_prereg'] == 'My dog ate it.'
preprint.reload()
assert preprint.why_no_prereg
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_WHY_NO_PREREG
assert log.params == {'user': user._id, 'preprint': preprint._id}
def test_update_prereg_links(self, app, user, preprint, url):
prereg_links = ['http://www.JasonKelce.com', 'http://www.ItsTheWholeTeam.com/']
update_payload = build_preprint_update_payload(preprint._id, attributes={'prereg_links': prereg_links})
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
contrib = AuthUserFactory()
preprint.add_contributor(contrib, READ)
res = app.patch_json_api(url, update_payload, auth=contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to perform this action.'
preprint.has_prereg_links = 'no'
preprint.save()
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot edit this field while your prereg links availability' \
' is set to false or is unanswered.'
preprint.has_prereg_links = 'available'
preprint.save()
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['prereg_links'] == prereg_links
preprint.reload()
assert preprint.prereg_links == prereg_links
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_PREREG_LINKS
assert log.params == {'user': user._id, 'preprint': preprint._id}
update_payload = build_preprint_update_payload(preprint._id, attributes={'prereg_links': 'maformed payload'})
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "str".'
def test_update_prereg_link_info(self, app, user, preprint, url):
update_payload = build_preprint_update_payload(
preprint._id,
attributes={'prereg_link_info': 'prereg_designs'}
)
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == SWITCH_VALIDATOR_ERROR
preprint.has_prereg_links = 'no'
preprint.save()
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'You cannot edit this field while your prereg links availability' \
' is set to false or is unanswered.'
preprint.has_prereg_links = 'available'
preprint.save()
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['prereg_link_info'] == 'prereg_designs'
preprint.reload()
assert preprint.prereg_link_info == 'prereg_designs'
log = preprint.logs.first()
assert log.action == PreprintLog.UPDATE_PREREG_LINKS_INFO
assert log.params == {'user': user._id, 'preprint': preprint._id}
update_payload = build_preprint_update_payload(
preprint._id,
attributes={'prereg_link_info': 'maformed payload'}
)
with override_switch(features.SLOAN_DATA_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == '"maformed payload" is not a valid choice.'
def test_sloan_updates(self, app, user, preprint, url):
"""
- Tests to ensure updating a preprint with unchanged data does not create superfluous log statements.
- Tests to ensure various dependent fields can be updated in a single request.
"""
preprint.has_prereg_links = 'available'
preprint.prereg_links = ['http://no-sf.io']
preprint.prereg_link_info = 'prereg_designs'
preprint.save()
update_payload = build_preprint_update_payload(
preprint._id,
attributes={
'has_prereg_links': 'available',
'prereg_link_info': 'prereg_designs',
'prereg_links': ['http://osf.io'], # changing here should be only non-factory created log.
}
)
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
# Any superfluous log statements?
logs = preprint.logs.all().values_list('action', 'params')
assert logs.count() == 3 # actions should be: 'subjects_updated', 'published', 'prereg_links_updated'
assert logs.latest() == ('prereg_links_updated', {'user': user._id, 'preprint': preprint._id})
# Can we set `has_prereg_links` to false and update `why_no_prereg` in a single request?
update_payload = build_preprint_update_payload(
preprint._id,
attributes={
'has_prereg_links': 'no',
'why_no_prereg': 'My dog ate it.'
}
)
with override_switch(features.SLOAN_PREREG_INPUT, active=True):
res = app.patch_json_api(url, update_payload, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['has_prereg_links'] == 'no'
assert res.json['data']['attributes']['why_no_prereg'] == 'My dog ate it.'
preprint.refresh_from_db()
assert preprint.has_prereg_links == 'no'
assert preprint.why_no_prereg == 'My dog ate it.'
@pytest.mark.django_db
class TestPreprintUpdateSubjects(UpdateSubjectsMixin):
@pytest.fixture()
def resource(self, user_admin_contrib, user_write_contrib, user_read_contrib):
preprint = PreprintFactory(creator=user_admin_contrib, is_published=True)
preprint.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib))
preprint.add_contributor(
user_read_contrib,
auth=Auth(user_admin_contrib),
permissions=READ)
preprint.save()
return preprint
@pytest.mark.django_db
class TestPreprintUpdateLicense:
@pytest.fixture()
def admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def cc0_license(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def mit_license(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def no_license(self):
return NodeLicense.objects.filter(name='No license').first()
@pytest.fixture()
def preprint_provider(self, cc0_license, no_license):
preprint_provider = PreprintProviderFactory()
preprint_provider.licenses_acceptable = [cc0_license, no_license]
preprint_provider.save()
return preprint_provider
@pytest.fixture()
def preprint(
self, admin_contrib, write_contrib, read_contrib,
preprint_provider):
preprint = PreprintFactory(
creator=admin_contrib,
provider=preprint_provider)
preprint.add_contributor(write_contrib, permissions=WRITE, auth=Auth(admin_contrib))
preprint.add_contributor(
read_contrib,
auth=Auth(admin_contrib),
permissions=READ)
preprint.save()
return preprint
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/'.format(API_BASE, preprint._id)
@pytest.fixture()
def make_payload(self):
def payload(
node_id, license_id=None, license_year=None,
copyright_holders=None, jsonapi_type='preprints'
):
attributes = {}
if license_year and copyright_holders:
attributes = {
'license_record': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'license_record': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'license_record': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'id': node_id,
'type': jsonapi_type,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(
url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(
self, admin_contrib, preprint, url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id='thisisafakelicenseid'
)
assert preprint.license is None
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
preprint.reload()
assert preprint.license is None
def test_admin_can_update_license(
self, admin_contrib, preprint, cc0_license,
url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
assert preprint.license is None
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
res_data = res.json['data']
pp_license_id = preprint.license.node_license._id
assert res_data['relationships']['license']['data'].get(
'id', None) == pp_license_id
assert res_data['relationships']['license']['data'].get(
'type', None) == 'licenses'
assert preprint.license.node_license == cc0_license
assert preprint.license.year is None
assert preprint.license.copyright_holders == []
# check logs
log = preprint.logs.latest()
assert log.action == 'license_changed'
assert log.params.get('preprint') == preprint._id
def test_admin_can_update_license_record(
self, admin_contrib, preprint, no_license,
url, make_payload, make_request):
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='2015',
copyright_holders=['Tonya Shepoly, Lucas Pucas']
)
assert preprint.license is None
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Tonya Shepoly, Lucas Pucas']
def test_cannot_update_license(
self, write_contrib, read_contrib, non_contrib,
preprint, cc0_license, url, make_payload, make_request):
# test_write_contrib_can_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 200
preprint.reload()
assert preprint.license.node_license == cc0_license
# test_read_contrib_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_non_contrib_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(
url, data,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_user_cannot_update_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_error(
self, admin_contrib, preprint, preprint_provider,
mit_license, no_license, url, make_payload, make_request):
# test_update_preprint_with_invalid_license_for_provider
data = make_payload(
node_id=preprint._id,
license_id=mit_license._id
)
assert preprint.license is None
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'Invalid license chosen for {}'.format(
preprint_provider.name)
# test_update_preprint_license_without_required_year_in_payload
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
copyright_holders=['Rachel', 'Rheisen']
)
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
# test_update_preprint_license_without_required_copyright_holders_in_payload
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='1994'
)
res = make_request(
url, data,
auth=admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_preprint_with_existing_license_year_attribute_only(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Daniel FromBrazil', 'Queen Jaedyn']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Daniel FromBrazil', 'Queen Jaedyn']
data = make_payload(
node_id=preprint._id,
license_year='2015'
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Daniel FromBrazil', 'Queen Jaedyn']
def test_update_preprint_with_existing_license_copyright_holders_attribute_only(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Captain Haley', 'Keegor Cannoli']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Captain Haley', 'Keegor Cannoli']
data = make_payload(
node_id=preprint._id,
copyright_holders=['Reason Danish', 'Ben the NJB']
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == [
'Reason Danish', 'Ben the NJB']
def test_update_preprint_with_existing_license_relationship_only(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. Lulu']
},
Auth(admin_contrib),
)
preprint.save()
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Lulu']
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == cc0_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Lulu']
def test_update_preprint_with_existing_license_relationship_and_attributes(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. Cosgrove']
},
Auth(admin_contrib),
save=True
)
assert preprint.license.node_license == no_license
assert preprint.license.year == '2014'
assert preprint.license.copyright_holders == ['Reason', 'Mr. Cosgrove']
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id,
license_year='2015',
copyright_holders=['Rheisen', 'Princess Tyler']
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.license.reload()
assert preprint.license.node_license == cc0_license
assert preprint.license.year == '2015'
assert preprint.license.copyright_holders == [
'Rheisen', 'Princess Tyler']
def test_update_preprint_license_does_not_change_project_license(
self, admin_contrib, preprint, cc0_license,
no_license, url, make_payload, make_request):
project = ProjectFactory(creator=admin_contrib)
preprint.node = project
preprint.save()
preprint.node.set_node_license(
{
'id': no_license.license_id,
'year': '2015',
'copyrightHolders': ['Simba', 'Mufasa']
},
auth=Auth(admin_contrib)
)
preprint.node.save()
assert preprint.node.node_license.node_license == no_license
data = make_payload(
node_id=preprint._id,
license_id=cc0_license._id
)
res = make_request(url, data, auth=admin_contrib.auth)
assert res.status_code == 200
preprint.reload()
assert preprint.license.node_license == cc0_license
assert preprint.node.node_license.node_license == no_license
def test_update_preprint_license_without_change_does_not_add_log(
self, admin_contrib, preprint, no_license, url, make_payload, make_request):
preprint.set_preprint_license(
{
'id': no_license.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(admin_contrib),
save=True
)
before_num_logs = preprint.logs.count()
before_update_log = preprint.logs.latest()
data = make_payload(
node_id=preprint._id,
license_id=no_license._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url, data, auth=admin_contrib.auth)
preprint.reload()
after_num_logs = preprint.logs.count()
after_update_log = preprint.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
@pytest.mark.django_db
class TestPreprintDetailPermissions:
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, admin):
return ProjectFactory(creator=admin, is_public=True)
@pytest.fixture()
def private_project(self, admin):
return ProjectFactory(creator=admin, is_public=False)
@pytest.fixture()
def subject(self):
return SubjectFactory()
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def unpublished_preprint(self, admin, provider, subject, public_project):
fact = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
is_published=False,
machine_state='initial')
assert fact.is_published is False
return fact
@pytest.fixture()
def private_preprint(self, admin, provider, subject, private_project, write_contrib):
fact = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
is_published=True,
is_public=False,
machine_state='accepted')
fact.add_contributor(write_contrib, permissions=WRITE)
fact.is_public = False
fact.save()
return fact
@pytest.fixture()
def published_preprint(self, admin, provider, subject, write_contrib):
fact = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
is_published=True,
is_public=True,
machine_state='accepted')
fact.add_contributor(write_contrib, permissions=WRITE)
return fact
@pytest.fixture()
def abandoned_private_preprint(
self, admin, provider, subject, private_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=private_project,
is_published=False,
is_public=False,
machine_state='initial')
@pytest.fixture()
def abandoned_public_preprint(
self, admin, provider, subject, public_project):
fact = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=provider,
subjects=[[subject._id]],
project=public_project,
is_published=False,
is_public=True,
machine_state='initial')
assert fact.is_public is True
return fact
@pytest.fixture()
def abandoned_private_url(self, abandoned_private_preprint):
return '/{}preprints/{}/'.format(
API_BASE, abandoned_private_preprint._id)
@pytest.fixture()
def abandoned_public_url(self, abandoned_public_preprint):
return '/{}preprints/{}/'.format(
API_BASE, abandoned_public_preprint._id)
@pytest.fixture()
def unpublished_url(self, unpublished_preprint):
return '/{}preprints/{}/'.format(API_BASE, unpublished_preprint._id)
@pytest.fixture()
def private_url(self, private_preprint):
return '/{}preprints/{}/'.format(API_BASE, private_preprint._id)
def test_preprint_is_published_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_preprint, unpublished_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_preprint._id
# test_unpublished_invisible_to_write_contribs
res = app.get(
unpublished_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_url, expect_errors=True)
assert res.status_code == 401
def test_preprint_is_public_detail(
self, app, admin, write_contrib, non_contrib,
private_preprint, private_url):
# test_private_visible_to_admins
res = app.get(private_url, auth=admin.auth)
assert res.json['data']['id'] == private_preprint._id
# test_private_visible_to_write_contribs
res = app.get(private_url, auth=write_contrib.auth)
assert res.status_code == 200
# test_private_invisible_to_non_contribs
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_invisible_to_public
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
def test_preprint_is_abandoned_detail(
self, app, admin, write_contrib,
non_contrib, abandoned_private_preprint,
abandoned_public_preprint,
abandoned_private_url,
abandoned_public_url):
# test_abandoned_private_visible_to_admins
res = app.get(abandoned_private_url, auth=admin.auth)
assert res.json['data']['id'] == abandoned_private_preprint._id
# test_abandoned_private_invisible_to_write_contribs
res = app.get(
abandoned_private_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_private_invisible_to_non_contribs
res = app.get(
abandoned_private_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_private_invisible_to_public
res = app.get(abandoned_private_url, expect_errors=True)
assert res.status_code == 401
# test_abandoned_public_visible_to_admins
res = app.get(abandoned_public_url, auth=admin.auth)
assert res.json['data']['id'] == abandoned_public_preprint._id
# test_abandoned_public_invisible_to_write_contribs
res = app.get(
abandoned_public_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_public_invisible_to_non_contribs
res = app.get(
abandoned_public_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_abandoned_public_invisible_to_public
res = app.get(abandoned_public_url, expect_errors=True)
assert res.status_code == 401
def test_access_primary_file_on_unpublished_preprint(
self, app, user, write_contrib):
unpublished = PreprintFactory(creator=user, is_public=True, is_published=False)
preprint_file_id = unpublished.primary_file._id
url = '/{}files/{}/'.format(API_BASE, preprint_file_id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert unpublished.is_published is False
res = app.get(url, expect_errors=True)
assert res.status_code == 401
unpublished.add_contributor(write_contrib, permissions=WRITE, save=True)
res = app.get(url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
@pytest.mark.django_db
class TestReviewsPreprintDetailPermissions:
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, admin):
return ProjectFactory(creator=admin, is_public=True)
@pytest.fixture()
def private_project(self, admin):
return ProjectFactory(creator=admin, is_public=False)
@pytest.fixture()
def subject(self):
return SubjectFactory()
@pytest.fixture()
def reviews_provider(self):
return PreprintProviderFactory(reviews_workflow='pre-moderation')
@pytest.fixture()
def unpublished_reviews_preprint(
self, admin, reviews_provider, subject, public_project, write_contrib):
preprint = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
is_published=False,
machine_state=DefaultStates.PENDING.value)
preprint.add_contributor(write_contrib, permissions=WRITE)
preprint.save()
return preprint
@pytest.fixture()
def unpublished_reviews_initial_preprint(
self, admin, reviews_provider, subject, public_project):
return PreprintFactory(
creator=admin,
filename='toe_socks_and_sunrises.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
is_published=False,
machine_state=DefaultStates.INITIAL.value)
@pytest.fixture()
def private_reviews_preprint(
self, admin, reviews_provider, subject, private_project, write_contrib):
preprint = PreprintFactory(
creator=admin,
filename='toe_socks_and_sunsets.pdf',
provider=reviews_provider,
subjects=[[subject._id]],
is_published=False,
is_public=False,
machine_state=DefaultStates.PENDING.value)
preprint.add_contributor(write_contrib, permissions=WRITE)
return preprint
@pytest.fixture()
def unpublished_url(self, unpublished_reviews_preprint):
return '/{}preprints/{}/'.format(
API_BASE, unpublished_reviews_preprint._id)
@pytest.fixture()
def unpublished_initial_url(self, unpublished_reviews_initial_preprint):
return '/{}preprints/{}/'.format(
API_BASE, unpublished_reviews_initial_preprint._id)
@pytest.fixture()
def private_url(self, private_reviews_preprint):
return '/{}preprints/{}/'.format(
API_BASE, private_reviews_preprint._id)
def test_reviews_preprint_is_published_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_reviews_preprint, unpublished_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_reviews_preprint._id
# test_unpublished_visible_to_write_contribs
res = app.get(
unpublished_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 200
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_url, expect_errors=True)
assert res.status_code == 401
def test_reviews_preprint_initial_detail(
self, app, admin, write_contrib, non_contrib,
unpublished_reviews_initial_preprint,
unpublished_initial_url):
# test_unpublished_visible_to_admins
res = app.get(unpublished_initial_url, auth=admin.auth)
assert res.json['data']['id'] == unpublished_reviews_initial_preprint._id
# test_unpublished_invisible_to_write_contribs
res = app.get(
unpublished_initial_url,
auth=write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_non_contribs
res = app.get(
unpublished_initial_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# test_unpublished_invisible_to_public
res = app.get(unpublished_initial_url, expect_errors=True)
assert res.status_code == 401
def test_reviews_preprint_is_public_detail(
self, app, admin, write_contrib, non_contrib,
private_reviews_preprint, private_url):
# test_private_visible_to_admins
res = app.get(private_url, auth=admin.auth)
assert res.json['data']['id'] == private_reviews_preprint._id
# test_private_visible_to_write_contribs
res = app.get(private_url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 200
# test_private_invisible_to_non_contribs
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_invisible_to_public
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestPreprintDetailWithMetrics:
# enable the ELASTICSEARCH_METRICS switch for all tests
@pytest.fixture(autouse=True)
def enable_elasticsearch_metrics(self):
with override_switch(features.ELASTICSEARCH_METRICS, active=True):
yield
@pytest.mark.parametrize(('metric_name', 'metric_class_name'),
[
('downloads', 'PreprintDownload'),
('views', 'PreprintView'),
])
def test_preprint_detail_with_downloads(self, app, settings, metric_name, metric_class_name):
preprint = PreprintFactory()
url = '/{}preprints/{}/?metrics[{}]=total'.format(API_BASE, preprint._id, metric_name)
with mock.patch('api.preprints.views.{}.get_count_for_preprint'.format(metric_class_name)) as mock_get_count_for_preprint:
mock_get_count_for_preprint.return_value = 42
res = app.get(url)
assert res.status_code == 200
data = res.json
assert 'metrics' in data['meta']
assert metric_name in data['meta']['metrics']
assert data['meta']['metrics'][metric_name] == 42
|
{
"content_hash": "dff0a8db94551c2dee8f5c7ace603666",
"timestamp": "",
"source": "github",
"line_count": 2080,
"max_line_length": 130,
"avg_line_length": 37.792307692307695,
"alnum_prop": 0.5976999796458375,
"repo_name": "brianjgeiger/osf.io",
"id": "5f39087c1cb1223957f70ef96efcc2b0d3a81e3c",
"size": "78608",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/preprints/views/test_preprint_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93287"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "364479"
},
{
"name": "JavaScript",
"bytes": "1789498"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "685055"
},
{
"name": "Python",
"bytes": "11891113"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from bittrex import Bittrex
class Market:
def __init__(self):
self.bittrex = Bittrex()
self.update_currencies_raw()
def update_currencies_raw(self):
self.currencies_raw = self.get_crypto_currencies()
self.currencies_list = sorted([c["MarketName"] for c in self.currencies_raw])
def get_crypto_currencies(self):
try:
return self.bittrex.get_markets()["result"]
except Exception:
return self.currencies_raw
|
{
"content_hash": "32f931ae673df88d91ffa28f6b25c1a3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 85,
"avg_line_length": 29,
"alnum_prop": 0.6308316430020284,
"repo_name": "r4rdsn/cryptocur",
"id": "e3f40e9928f40cc7cfc63a57d6fbab5db4338ca3",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22885"
}
],
"symlink_target": ""
}
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
https://tools.ietf.org/html/rfc7231#section-7.1.4
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import time
from collections import defaultdict
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.crypto import md5
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.log import log_response
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = _lazy_re_compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(*t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
cc = defaultdict(set)
if response.get('Cache-Control'):
for field in cc_delim_re.split(response.headers['Cache-Control']):
directive, value = dictitem(field)
if directive == 'no-cache':
# no-cache supports multiple field names.
cc[directive].add(value)
else:
cc[directive] = value
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
directive = k.replace('_', '-')
if directive == 'no-cache':
# no-cache supports multiple field names.
cc[directive].add(v)
else:
cc[directive] = v
directives = []
for directive, values in cc.items():
if isinstance(values, set):
if True in values:
# True takes precedence.
values = {True}
directives.extend([dictvalue(directive, value) for value in values])
else:
directives.append(dictvalue(directive, values))
cc = ', '.join(directives)
response.headers['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response.headers['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming and response.content:
response.headers['ETag'] = quote_etag(
md5(response.content, usedforsecurity=False).hexdigest(),
)
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
'Precondition Failed: %s', request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response.headers[header] = response.headers[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (
not if_none_match_etags and
if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since) and
request.method in ('GET', 'HEAD')
):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response.headers['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True, private=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". If headers
contains an asterisk, then "Vary" header will consist of a single asterisk
'*'. Otherwise, existing headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response.headers['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
vary_headers += additional_headers
if '*' in vary_headers:
response.headers['Vary'] = '*'
else:
response.headers['Vary'] = ', '.join(vary_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response.headers['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
cache_key += '.%s' % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = md5(usedforsecurity=False)
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = md5(request.build_absolute_uri().encode('ascii'), usedforsecurity=False)
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = md5(request.build_absolute_uri().encode('ascii'), usedforsecurity=False)
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N
# If i18n is used, the generated cache key will be suffixed with the
# current locale. Adding the raw value of Accept-Language is redundant
# in that case and would result in storing the same content under
# multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response.headers['Vary']):
header = header.upper().replace('-', '_')
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
{
"content_hash": "067e0e65ce9d4c5bcf581b2765809d5e",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 112,
"avg_line_length": 39.32211538461539,
"alnum_prop": 0.6640787382320578,
"repo_name": "ar4s/django",
"id": "c0e47e0e429c06592a779a071792f7a2621698c1",
"size": "16358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import grpc
from google.cloud.monitoring_v3.proto import notification_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2
from google.cloud.monitoring_v3.proto import notification_service_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class NotificationChannelServiceStub(object):
"""The Notification Channel API provides access to configuration that
controls how messages related to incidents are sent.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListNotificationChannelDescriptors = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.FromString,
)
self.GetNotificationChannelDescriptor = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.FromString,
)
self.ListNotificationChannels = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/ListNotificationChannels',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.FromString,
)
self.GetNotificationChannel = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/GetNotificationChannel',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.CreateNotificationChannel = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.UpdateNotificationChannel = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
self.DeleteNotificationChannel = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SendNotificationChannelVerificationCode = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetNotificationChannelVerificationCode = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.FromString,
)
self.VerifyNotificationChannel = channel.unary_unary(
'/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel',
request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,
)
class NotificationChannelServiceServicer(object):
"""The Notification Channel API provides access to configuration that
controls how messages related to incidents are sent.
"""
def ListNotificationChannelDescriptors(self, request, context):
"""Lists the descriptors for supported channel types. The use of descriptors
makes it possible for new channel types to be dynamically added.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNotificationChannelDescriptor(self, request, context):
"""Gets a single channel descriptor. The descriptor indicates which fields
are expected / permitted for a notification channel of the given type.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListNotificationChannels(self, request, context):
"""Lists the notification channels that have been created for the project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNotificationChannel(self, request, context):
"""Gets a single notification channel. The channel includes the relevant
configuration details with which the channel was created. However, the
response may truncate or omit passwords, API keys, or other private key
matter and thus the response may not be 100% identical to the information
that was supplied in the call to the create method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateNotificationChannel(self, request, context):
"""Creates a new notification channel, representing a single notification
endpoint such as an email address, SMS number, or pagerduty service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateNotificationChannel(self, request, context):
"""Updates a notification channel. Fields not specified in the field mask
remain unchanged.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteNotificationChannel(self, request, context):
"""Deletes a notification channel.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendNotificationChannelVerificationCode(self, request, context):
"""Causes a verification code to be delivered to the channel. The code
can then be supplied in `VerifyNotificationChannel` to verify the channel.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNotificationChannelVerificationCode(self, request, context):
"""Requests a verification code for an already verified channel that can then
be used in a call to VerifyNotificationChannel() on a different channel
with an equivalent identity in the same or in a different project. This
makes it possible to copy a channel between projects without requiring
manual reverification of the channel. If the channel is not in the
verified state, this method will fail (in other words, this may only be
used if the SendNotificationChannelVerificationCode and
VerifyNotificationChannel paths have already been used to put the given
channel into the verified state).
There is no guarantee that the verification codes returned by this method
will be of a similar structure or form as the ones that are delivered
to the channel via SendNotificationChannelVerificationCode; while
VerifyNotificationChannel() will recognize both the codes delivered via
SendNotificationChannelVerificationCode() and returned from
GetNotificationChannelVerificationCode(), it is typically the case that
the verification codes delivered via
SendNotificationChannelVerificationCode() will be shorter and also
have a shorter expiration (e.g. codes such as "G-123456") whereas
GetVerificationCode() will typically return a much longer, websafe base
64 encoded string that has a longer expiration time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VerifyNotificationChannel(self, request, context):
"""Verifies a `NotificationChannel` by proving receipt of the code
delivered to the channel as a result of calling
`SendNotificationChannelVerificationCode`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NotificationChannelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListNotificationChannelDescriptors': grpc.unary_unary_rpc_method_handler(
servicer.ListNotificationChannelDescriptors,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.SerializeToString,
),
'GetNotificationChannelDescriptor': grpc.unary_unary_rpc_method_handler(
servicer.GetNotificationChannelDescriptor,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.SerializeToString,
),
'ListNotificationChannels': grpc.unary_unary_rpc_method_handler(
servicer.ListNotificationChannels,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.SerializeToString,
),
'GetNotificationChannel': grpc.unary_unary_rpc_method_handler(
servicer.GetNotificationChannel,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString,
),
'CreateNotificationChannel': grpc.unary_unary_rpc_method_handler(
servicer.CreateNotificationChannel,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString,
),
'UpdateNotificationChannel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateNotificationChannel,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString,
),
'DeleteNotificationChannel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteNotificationChannel,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'SendNotificationChannelVerificationCode': grpc.unary_unary_rpc_method_handler(
servicer.SendNotificationChannelVerificationCode,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetNotificationChannelVerificationCode': grpc.unary_unary_rpc_method_handler(
servicer.GetNotificationChannelVerificationCode,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.SerializeToString,
),
'VerifyNotificationChannel': grpc.unary_unary_rpc_method_handler(
servicer.VerifyNotificationChannel,
request_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.FromString,
response_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.monitoring.v3.NotificationChannelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
{
"content_hash": "8fda24f80bc21efa16459cb62121c021",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 172,
"avg_line_length": 64.34334763948497,
"alnum_prop": 0.776747598719317,
"repo_name": "tseaver/gcloud-python",
"id": "69747310a24c43c52b963c298ae7e00be97a3a31",
"size": "15062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/google/cloud/monitoring_v3/proto/notification_service_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "93642"
},
{
"name": "Python",
"bytes": "2874989"
},
{
"name": "Shell",
"bytes": "4436"
}
],
"symlink_target": ""
}
|
import inspect
import logging
import requests
import xmltodict
from xml.parsers.expat import ExpatError
from optionaldict import optionaldict
from wechatpy.crypto import WeChatRefundCrypto
from wechatpy.utils import random_string
from wechatpy.exceptions import WeChatPayException, InvalidSignatureException
from wechatpy.pay.utils import (
calculate_signature,
calculate_signature_hmac,
_check_signature,
dict_to_xml,
)
from wechatpy.pay.api.base import BaseWeChatPayAPI
from wechatpy.pay import api
logger = logging.getLogger(__name__)
def _is_api_endpoint(obj):
return isinstance(obj, BaseWeChatPayAPI)
class WeChatPay:
"""
微信支付接口
:param appid: 微信公众号 appid
:param sub_appid: 当前调起支付的小程序APPID
:param api_key: 商户 key,不要在这里使用小程序的密钥
:param mch_id: 商户号
:param sub_mch_id: 可选,子商户号,受理模式下必填
:param mch_cert: 必填,商户证书路径
:param mch_key: 必填,商户证书私钥路径
:param timeout: 可选,请求超时时间,单位秒,默认无超时设置
:param sandbox: 可选,是否使用测试环境,默认为 False
"""
redpack = api.WeChatRedpack()
"""红包接口"""
transfer = api.WeChatTransfer()
"""企业付款接口"""
coupon = api.WeChatCoupon()
"""代金券接口"""
order = api.WeChatOrder()
"""订单接口"""
refund = api.WeChatRefund()
"""退款接口"""
micropay = api.WeChatMicroPay()
"""刷卡支付接口"""
tools = api.WeChatTools()
"""工具类接口"""
jsapi = api.WeChatJSAPI()
"""公众号网页 JS 支付接口"""
withhold = api.WeChatWithhold()
"""代扣接口"""
app_auth = api.WeChatAppAuth()
"""实名认证接口"""
API_BASE_URL = "https://api.mch.weixin.qq.com/"
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, _api in api_endpoints:
api_cls = type(_api)
_api = api_cls(self)
setattr(self, name, _api)
return self
def __init__(
self,
appid,
api_key,
mch_id,
sub_mch_id=None,
mch_cert=None,
mch_key=None,
timeout=None,
sandbox=False,
sub_appid=None,
):
self.appid = appid
self.sub_appid = sub_appid
self.api_key = api_key
self.mch_id = mch_id
self.sub_mch_id = sub_mch_id
self.mch_cert = mch_cert
self.mch_key = mch_key
self._using_pkcs12_cert = False
self.timeout = timeout
self.sandbox = sandbox
self._sandbox_api_key = None
self._http = requests.Session()
if mch_cert and mch_cert.endswith(".p12"):
from requests_pkcs12 import Pkcs12Adapter
# 商户 .p12 格式证书,证书密码默认为商户 ID
self._http.mount(
self.API_BASE_URL, Pkcs12Adapter(pkcs12_filename=self.mch_cert, pkcs12_password=self.mch_id)
)
self._using_pkcs12_cert = True
def _fetch_sandbox_api_key(self):
nonce_str = random_string(32)
sign = calculate_signature({"mch_id": self.mch_id, "nonce_str": nonce_str}, self.api_key)
payload = dict_to_xml(
{
"mch_id": self.mch_id,
"nonce_str": nonce_str,
},
sign=sign,
)
headers = {"Content-Type": "text/xml"}
api_url = f"{self.API_BASE_URL}sandboxnew/pay/getsignkey"
response = self._http.post(api_url, data=payload, headers=headers)
return xmltodict.parse(response.text)["xml"].get("sandbox_signkey")
def _request(self, method, url_or_endpoint, **kwargs):
if not url_or_endpoint.startswith(("http://", "https://")):
api_base_url = kwargs.pop("api_base_url", self.API_BASE_URL)
if self.sandbox:
api_base_url = f"{api_base_url}sandboxnew/"
url = f"{api_base_url}{url_or_endpoint}"
else:
url = url_or_endpoint
if isinstance(kwargs.get("data", ""), dict):
data = kwargs["data"]
if "mchid" not in data:
# Fuck Tencent
data.setdefault("mch_id", self.mch_id)
data.setdefault("sub_mch_id", self.sub_mch_id)
data.setdefault("nonce_str", random_string(32))
data = optionaldict(data)
if data.get("sign_type", "MD5") == "HMAC-SHA256":
sign = calculate_signature_hmac(data, self.sandbox_api_key if self.sandbox else self.api_key)
else:
sign = calculate_signature(data, self.sandbox_api_key if self.sandbox else self.api_key)
body = dict_to_xml(data, sign)
body = body.encode("utf-8")
kwargs["data"] = body
# 商户 PEM 证书
if not self._using_pkcs12_cert and self.mch_cert and self.mch_key:
kwargs["cert"] = (self.mch_cert, self.mch_key)
kwargs["timeout"] = kwargs.get("timeout", self.timeout)
logger.debug("Request to WeChat API: %s %s\n%s", method, url, kwargs)
res = self._http.request(method=method, url=url, **kwargs)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatPayException(
return_code=None,
client=self,
request=reqe.request,
response=reqe.response,
)
return self._handle_result(res)
def _handle_result(self, res):
res.encoding = "utf-8-sig"
xml = res.text
logger.debug("Response from WeChat API \n %s", xml)
try:
data = xmltodict.parse(xml)["xml"]
except (xmltodict.ParsingInterrupted, ExpatError):
# 解析 XML 失败
logger.debug("WeChat payment result xml parsing error", exc_info=True)
return xml
return_code = data["return_code"]
return_msg = data.get("return_msg", data.get("retmsg"))
result_code = data.get("result_code", data.get("retcode"))
errcode = data.get("err_code")
errmsg = data.get("err_code_des")
if return_code != "SUCCESS" or result_code != "SUCCESS":
# 返回状态码不为成功
raise WeChatPayException(
return_code,
result_code,
return_msg,
errcode,
errmsg,
client=self,
request=res.request,
response=res,
)
return data
def get(self, url, **kwargs):
return self._request(method="get", url_or_endpoint=url, **kwargs)
def post(self, url, **kwargs):
return self._request(method="post", url_or_endpoint=url, **kwargs)
def check_signature(self, params):
return _check_signature(params, self.api_key if not self.sandbox else self.sandbox_api_key)
@classmethod
def get_payment_data(cls, xml):
"""
解析微信支付结果通知,获得appid, mch_id, out_trade_no, transaction_id
如果你需要进一步判断,请先用appid, mch_id来生成WeChatPay,
然后用`wechatpay.parse_payment_result(xml)`来校验支付结果
使用示例::
from wechatpy.pay import WeChatPay
# 假设你已经获取了微信服务器推送的请求中的xml数据并存入xml变量
data = WeChatPay.get_payment_appid(xml)
{
"appid": "公众号或者小程序的id",
"mch_id": "商户id",
}
"""
try:
data = xmltodict.parse(xml)
except (xmltodict.ParsingInterrupted, ExpatError):
raise ValueError("invalid xml")
if not data or "xml" not in data:
raise ValueError("invalid xml")
data = data["xml"]
return {
"appid": data["appid"],
"mch_id": data["mch_id"],
"out_trade_no": data["out_trade_no"],
"transaction_id": data["transaction_id"],
}
def parse_payment_result(self, xml):
"""解析微信支付结果通知"""
try:
data = xmltodict.parse(xml)
except (xmltodict.ParsingInterrupted, ExpatError):
raise InvalidSignatureException()
if not data or "xml" not in data:
raise InvalidSignatureException()
data = data["xml"]
sign = data.pop("sign", None)
real_sign = calculate_signature(data, self.api_key if not self.sandbox else self.sandbox_api_key)
if sign != real_sign:
raise InvalidSignatureException()
for key in (
"total_fee",
"settlement_total_fee",
"cash_fee",
"coupon_fee",
"coupon_count",
):
if key in data:
data[key] = int(data[key])
data["sign"] = sign
return data
def parse_refund_notify_result(self, xml):
"""解析微信退款结果通知"""
refund_crypto = WeChatRefundCrypto(self.api_key if not self.sandbox else self.sandbox_api_key)
data = refund_crypto.decrypt_message(xml, self.appid, self.mch_id)
for key in (
"total_fee",
"settlement_total_fee",
"refund_fee",
"settlement_refund_fee",
):
if key in data:
data[key] = int(data[key])
return data
@property
def sandbox_api_key(self):
if self.sandbox and self._sandbox_api_key is None:
self._sandbox_api_key = self._fetch_sandbox_api_key()
return self._sandbox_api_key
|
{
"content_hash": "7167cf5bef9190dfb931dee822696c85",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 109,
"avg_line_length": 32.39510489510489,
"alnum_prop": 0.5626551538046412,
"repo_name": "jxtech/wechatpy",
"id": "f7d2c66fb8a8a18d78e8b33f34f8ce3db25ff1c8",
"size": "9886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wechatpy/pay/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "651807"
}
],
"symlink_target": ""
}
|
import os
import pkg_resources
from typeguard import check_argument_types
from typing import Any, Dict, Iterable, List, Set, cast
from pkg_resources import Distribution
from logging import getLogger as _logger
from .canonical import name as _name
from .cache import PluginCache
from .loader import traverse
from .tarjan import robust_topological_sort
log = _logger(__name__)
Plugin = Any
Flags = Set[str]
class PluginManager:
namespace:str
folders:Iterable[str]
plugins:List[Plugin]
named:PluginCache
__wrapped__ = None # Python decorator protocol bypass.
def __init__(self, namespace:str, folders:Iterable[str]=None):
assert check_argument_types()
self.namespace = namespace
self.folders = folders if folders else []
self.plugins = []
self.named = PluginCache(namespace)
self.ws = ws = pkg_resources.working_set
for container in self.folders: # pragma: no cover - TODO: Figure out how to test this.
path = os.path.abspath(os.path.expanduser(container))
log.info("Adding " + path + " to plugin search path.", extra=dict(path=path, namespace=self.namespace))
ws.add_entry(path)
env = pkg_resources.Environment([path])
ws.require(*env)
ws.subscribe(self._register)
super(PluginManager, self).__init__()
def register(self, name:str, plugin:object) -> None:
assert check_argument_types()
log.info("Registering plugin" + name + " in namespace " + self.namespace + ".",
extra = dict(plugin_name=name, namespace=self.namespace, plugin=_name(plugin)))
self.named[name] = plugin
self.plugins.append(plugin)
def _register(self, dist:Distribution) -> None:
assert check_argument_types()
entries = dist.get_entry_map(self.namespace)
if not entries:
return
try:
for name in entries:
plugin = entries[name].load()
self.register(name, plugin)
except pkg_resources.UnknownExtra: # pragma: no cover - TODO: Figure out how to test this.
log.warning("Skipping registration of '{!r}' due to missing dependencies.".format(dist), exc_info=True)
except ImportError: # pragma: no cover - TODO: Figure out how to test this.
log.error("Skipping registration of '{!r}' due to uncaught error on import.".format(dist), exc_info=True)
def __iter__(self):
for plugin in self.plugins:
yield plugin
def __getattr__(self, name:str):
if name.startswith('_'): raise AttributeError()
try:
return self.named[name]
except IndexError:
pass
raise AttributeError()
def __getitem__(self, name:str):
if name.startswith('_'): raise KeyError()
return self.named[name]
class ExtensionManager(PluginManager):
"""More advanced plugin architecture using structured "extensions".
Extensions describe their dependencies using an expressive syntax:
* ``provides`` — declare tags describing the features offered by the plugin
* ``needs`` — delcare the tags that must be present for this extension to function
* ``uses`` — declare the tags that must be evaluated prior to this extension, but aren't hard requirements
* ``first`` — declare that this extension is a dependency of all other non-first extensions
* ``last`` — declare that this extension depends on all other non-last extensions
"""
def order(self, config=None, prefix=''):
extensions = traverse(config if config else self.plugins, prefix)
# First, we check that everything absolutely required is configured.
provided: Flags = cast(Flags, set().union(*(traverse(ext, 'provides', ()) for ext in extensions)))
needed: Flags = cast(Flags, set().union(*(traverse(ext, 'needs', ()) for ext in extensions)))
if not provided.issuperset(needed):
raise LookupError("Extensions providing the following features must be configured:\n" + \
', '.join(needed.difference(provided)))
# Now we spider the configured extensions and graph them. This is a multi-step process.
# First, create a mapping of feature names to extensions. We only want extension objects in our initial graph.
universal: List[Plugin] = list()
inverse: List[Plugin] = list()
provides: Dict[str, Plugin] = dict()
excludes: Dict[str, Plugin] = dict()
for ext in extensions:
for feature in traverse(ext, 'provides', ()):
provides[feature] = ext
for feature in traverse(ext, 'excludes', ()):
excludes.setdefault(feature, []).append(ext)
if traverse(ext, 'first', False):
universal.append(ext)
elif traverse(ext, 'last', False):
inverse.append(ext)
# We bail early if there are known conflicts up-front.
for conflict in set(provides) & set(excludes):
raise RuntimeError("{!r} precludes use of '{!s}', which is defined by {!r}".format(
excludes[conflict], conflict, provides[conflict]))
# Now we build the initial graph.
dependencies: Dict[Plugin, Flags] = dict()
for ext in extensions:
# We build a set of requirements from needs + uses that have been fulfilled.
requirements = set(traverse(ext, 'needs', ()))
requirements.update(set(traverse(ext, 'uses', ())).intersection(provided))
dependencies[ext] = set(provides[req] for req in requirements)
if universal and ext not in universal:
dependencies[ext].update(universal)
if inverse and ext in inverse:
dependencies[ext].update(set(extensions).difference(inverse))
# Build the final "unidirected acyclic graph"; a list of extensions in dependency-resolved order.
dependencies = robust_topological_sort(dependencies)
# If there are any tuple elements, we've got a circular reference!
extensions = []
for ext in dependencies:
if len(ext) > 1:
raise LookupError("Circular dependency found: " + repr(ext))
extensions.append(ext[0])
extensions.reverse()
return extensions
|
{
"content_hash": "7366bbe4ed92d64a5182b528944a19ef",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 113,
"avg_line_length": 32.78409090909091,
"alnum_prop": 0.7020797227036395,
"repo_name": "marrow/package",
"id": "cf444084abb183ad2b54e110de531aba787bb07c",
"size": "5780",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "marrow/package/host.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "765"
},
{
"name": "Python",
"bytes": "33983"
}
],
"symlink_target": ""
}
|
"""
This file is part of the PROPheT tool.
Copyright (C) 2016: MKLab <pmitzias@iti.gr; mriga@iti.gr; skontopo@iti.gr>
http://mklab.iti.gr/project/prophet-ontology-populator
https://github.com/MKLab-ITI/prophet
Licensed under the Apache License, Version 2.0 (the "License").
You may use this file in compliance with the License.
For more details, see LICENCE file.
"""
# Form implementation generated from reading ui file 'ui\AddNamespace.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AddNamespace(object):
def setupUi(self, AddNamespace):
AddNamespace.setObjectName(_fromUtf8("AddNamespace"))
AddNamespace.resize(417, 139)
self.gridLayout = QtGui.QGridLayout(AddNamespace)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lblNamespacePrefix = QtGui.QLabel(AddNamespace)
font = QtGui.QFont()
font.setPointSize(10)
self.lblNamespacePrefix.setFont(font)
self.lblNamespacePrefix.setObjectName(_fromUtf8("lblNamespacePrefix"))
self.horizontalLayout.addWidget(self.lblNamespacePrefix)
self.lineEditNamespacePrefix = QtGui.QLineEdit(AddNamespace)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditNamespacePrefix.sizePolicy().hasHeightForWidth())
self.lineEditNamespacePrefix.setSizePolicy(sizePolicy)
self.lineEditNamespacePrefix.setMinimumSize(QtCore.QSize(300, 0))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEditNamespacePrefix.setFont(font)
self.lineEditNamespacePrefix.setObjectName(_fromUtf8("lineEditNamespacePrefix"))
self.horizontalLayout.addWidget(self.lineEditNamespacePrefix)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 3)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lblNamespaceURL = QtGui.QLabel(AddNamespace)
self.lblNamespaceURL.setMinimumSize(QtCore.QSize(37, 0))
font = QtGui.QFont()
font.setPointSize(10)
self.lblNamespaceURL.setFont(font)
self.lblNamespaceURL.setObjectName(_fromUtf8("lblNamespaceURL"))
self.horizontalLayout_2.addWidget(self.lblNamespaceURL)
self.lineEditNamespaceURL = QtGui.QLineEdit(AddNamespace)
self.lineEditNamespaceURL.setMinimumSize(QtCore.QSize(300, 0))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEditNamespaceURL.setFont(font)
self.lineEditNamespaceURL.setFocusPolicy(QtCore.Qt.StrongFocus)
self.lineEditNamespaceURL.setObjectName(_fromUtf8("lineEditNamespaceURL"))
self.horizontalLayout_2.addWidget(self.lineEditNamespaceURL)
self.gridLayout.addLayout(self.horizontalLayout_2, 1, 0, 1, 3)
self.btnCancel = QtGui.QPushButton(AddNamespace)
self.btnCancel.setMinimumSize(QtCore.QSize(75, 0))
self.btnCancel.setMaximumSize(QtCore.QSize(75, 16777215))
self.btnCancel.setObjectName(_fromUtf8("btnCancel"))
self.gridLayout.addWidget(self.btnCancel, 2, 2, 1, 1)
self.btnOK = QtGui.QPushButton(AddNamespace)
self.btnOK.setEnabled(True)
self.btnOK.setMinimumSize(QtCore.QSize(75, 0))
self.btnOK.setMaximumSize(QtCore.QSize(75, 16777215))
self.btnOK.setDefault(True)
self.btnOK.setObjectName(_fromUtf8("btnOK"))
self.gridLayout.addWidget(self.btnOK, 2, 1, 1, 1)
self.retranslateUi(AddNamespace)
QtCore.QMetaObject.connectSlotsByName(AddNamespace)
def retranslateUi(self, AddNamespace):
AddNamespace.setWindowTitle(_translate("AddNamespace", "Add a new namespace", None))
self.lblNamespacePrefix.setToolTip(_translate("AddNamespace", "Type a prefix for the new namespace", None))
self.lblNamespacePrefix.setText(_translate("AddNamespace", "Prefix:", None))
self.lineEditNamespacePrefix.setToolTip(_translate("AddNamespace", "Type a prefix for the new namespace", None))
self.lblNamespaceURL.setToolTip(_translate("AddNamespace", "Type a URL for the new namespace", None))
self.lblNamespaceURL.setText(_translate("AddNamespace", "URI:", None))
self.lineEditNamespaceURL.setToolTip(_translate("AddNamespace", "Type a URL for the new namespace", None))
self.lineEditNamespaceURL.setText(_translate("AddNamespace", "http://", None))
self.btnCancel.setText(_translate("AddNamespace", "Cancel", None))
self.btnOK.setText(_translate("AddNamespace", "OK", None))
|
{
"content_hash": "9a7d66b77f950cb1c04fd8a2a17c697a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 120,
"avg_line_length": 49.027522935779814,
"alnum_prop": 0.7215568862275449,
"repo_name": "MKLab-ITI/prophet",
"id": "117547a0c56b36300707970cd680620f98f03eed",
"size": "5369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/AddNamespace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4308894"
}
],
"symlink_target": ""
}
|
"""
This module instantiates the Flask application and declares the main error
handling function ``make_json_error``.
"""
# IMPORTANT: updates to the following must also be done in setup.py.
__title__ = "My Smart Home"
__version__ = "0.1.0"
__author__ = "Hugues Demers"
__email__ = "hdemers@gmail.com"
__copyright__ = "Copyright 2013 Hugues Demers"
__license__ = "MIT"
import os
import traceback
from flask import Flask, jsonify
from flask_sockets import Sockets
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from cloudly import logger
from cloudly.notify import notify as cloudly_notify
FORMAT = "%(asctime)s] %(levelname)s %(module)s %(funcName)s: %(message)s"
# The application
app = Flask(__name__)
sockets = Sockets(app)
# Debugging
app.debug = True
app.debug_log_format = FORMAT
log = logger.init(__name__)
# Set a 'SECRET_KEY' to enable the Flask session cookies
app.config['SECRET_KEY'] = os.environ.get("WEBAPP_SESSION_SECRET_KEY",
'oftg09jW2FtbXfcud9OS')
# Make this app a JSON app.
# Inspired from cf. http://flask.pocoo.org/snippets/83/
def make_json_error(ex):
log.error(ex)
log.error(traceback.format_exc())
message = ex.description if isinstance(ex, HTTPException) else str(ex)
message = message.replace("<p>", "").replace("</p>", "") if message else ""
code = ex.code if isinstance(ex, HTTPException) else 500
response = jsonify(message=message, status_code=code)
response.status_code = code
if code in [500]:
notify(ex, code)
return response
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
def notify(exception, code=None):
if not code:
code = exception.code if isinstance(exception, HTTPException) else 500
cloudly_notify("Exception: {}".format(code), "{}\n\n{}".format(
exception, traceback.format_exc(exception)))
import smrthome.views # noqa
|
{
"content_hash": "9e491eba2c32595fde6673c69fa69550",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 29.323529411764707,
"alnum_prop": 0.691073219658977,
"repo_name": "hdemers/smrthome",
"id": "254ef1d09449c49ff60fdf0e701c86bdfd3dc2da",
"size": "1994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smrthome/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "337"
},
{
"name": "JavaScript",
"bytes": "300541"
},
{
"name": "Python",
"bytes": "5312"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('collect', '0009_auto_20211102_1100'),
]
operations = [
migrations.CreateModel(
name='CollectContribution',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
('value', models.DecimalField(blank=True, decimal_places=5, max_digits=12, null=True)),
],
options={
'verbose_name': 'Collect contribution',
'verbose_name_plural': 'Collect contributions',
},
bases=('activities.contribution',),
),
migrations.AddField(
model_name='collectcontributor',
name='value',
field=models.DecimalField(blank=True, decimal_places=5, max_digits=12, null=True),
),
]
|
{
"content_hash": "647f5b27eea9c4d6273ec1e0d5961075",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 206,
"avg_line_length": 36.48275862068966,
"alnum_prop": 0.5964083175803403,
"repo_name": "onepercentclub/bluebottle",
"id": "b330de3dff728ba3ad6ba93e65c75862547da673",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/collect/migrations/0010_auto_20211102_1258.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from game import Agent
from game import Directions
import random
class KeyboardAgent(Agent):
"""
An agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'a'
EAST_KEY = 'd'
NORTH_KEY = 'w'
SOUTH_KEY = 's'
STOP_KEY = 'q'
def __init__( self, index = 0 ):
self.lastMove = Directions.STOP
self.index = index
self.keys = []
def getAction( self, state):
from graphicsUtils import keys_waiting
from graphicsUtils import keys_pressed
keys = keys_waiting() + keys_pressed()
if keys != []:
self.keys = keys
legal = state.getLegalActions(self.index)
move = self.getMove(legal)
if move == Directions.STOP:
# Try to move in the same direction as before
if self.lastMove in legal:
move = self.lastMove
if (self.STOP_KEY in self.keys) and Directions.STOP in legal: move = Directions.STOP
if move not in legal:
move = random.choice(legal)
self.lastMove = move
return move
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
class KeyboardAgent2(KeyboardAgent):
"""
A second agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'j'
EAST_KEY = "l"
NORTH_KEY = 'i'
SOUTH_KEY = 'k'
STOP_KEY = 'u'
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
|
{
"content_hash": "d205f3cf58088860d32387f421421218",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 120,
"avg_line_length": 33.98571428571429,
"alnum_prop": 0.6145439260193358,
"repo_name": "startupjing/Artificial-Intelligence",
"id": "a3e96ef185586213069e4f33cc8b212ccb1f87b9",
"size": "2804",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "artificial_intelligence/search/keyboardAgents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "59850"
},
{
"name": "Python",
"bytes": "895190"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_
from sphinx.roles import XRefRole
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.nodes import make_refnode
import sphinx.ext.autodoc
class CtonObject(ObjectDescription):
"""
Any kind of Cretonne IL object.
This is a shared base class for the different kinds of indexable objects
in the Cretonne IL reference.
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
def add_target_and_index(self, name, sig, signode):
"""
Add ``name`` the the index.
:param name: The object name returned by :func:`handle_signature`.
:param sig: The signature text.
:param signode: The output node.
"""
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['cton']['objects']
if name in inv:
self.state_machine.reporter.warning(
'duplicate Cretonne object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
targetname, '', None))
# Type variables are indicated as %T.
typevar = re.compile('(\%[A-Z])')
def parse_type(name, signode):
"""
Parse a type with embedded type vars and append to signode.
Return a a string that can be compiled into a regular expression matching
the type.
"""
re_str = ''
for part in typevar.split(name):
if part == '':
continue
if len(part) == 2 and part[0] == '%':
# This is a type parameter. Don't display the %, use emphasis
# instead.
part = part[1]
signode += nodes.emphasis(part, part)
re_str += r'\w+'
else:
signode += addnodes.desc_name(part, part)
re_str += re.escape(part)
return re_str
class CtonType(CtonObject):
"""A Cretonne IL type description."""
def handle_signature(self, sig, signode):
"""
Parse type signature in ``sig`` and append description to signode.
Return a global object name for ``add_target_and_index``.
"""
name = sig.strip()
parse_type(name, signode)
return name
def get_index_text(self, name):
return name + ' (IL type)'
sep_equal = re.compile('\s*=\s*')
sep_comma = re.compile('\s*,\s*')
def parse_params(s, signode):
for i, p in enumerate(sep_comma.split(s)):
if i != 0:
signode += nodes.Text(', ')
signode += nodes.emphasis(p, p)
class CtonInst(CtonObject):
"""A Cretonne IL instruction."""
doc_field_types = [
TypedField('argument', label=l_('Arguments'),
names=('in', 'arg'),
typerolename='type', typenames=('type',)),
TypedField('result', label=l_('Results'),
names=('out', 'result'),
typerolename='type', typenames=('type',)),
GroupedField(
'typevar', names=('typevar',), label=l_('Type Variables')),
GroupedField('flag', names=('flag',), label=l_('Flags')),
Field('resulttype', label=l_('Result type'), has_arg=False,
names=('rtype',)),
]
def handle_signature(self, sig, signode):
# Look for signatures like
#
# v1, v2 = foo op1, op2
# v1 = foo
# foo op1
parts = re.split(sep_equal, sig, 1)
if len(parts) == 2:
# Outgoing parameters.
parse_params(parts[0], signode)
signode += nodes.Text(' = ')
name = parts[1]
else:
name = parts[0]
# Parse 'name arg, arg'
parts = name.split(None, 1)
name = parts[0]
signode += addnodes.desc_name(name, name)
if len(parts) == 2:
# Incoming parameters.
signode += nodes.Text(' ')
parse_params(parts[1], signode)
return name
def get_index_text(self, name):
return name
class CtonInstGroup(CtonObject):
"""A Cretonne IL instruction group."""
class CretonneDomain(Domain):
"""Cretonne domain for intermediate language objects."""
name = 'cton'
label = 'Cretonne'
object_types = {
'type': ObjType(l_('type'), 'type'),
'inst': ObjType(l_('instruction'), 'inst')
}
directives = {
'type': CtonType,
'inst': CtonInst,
'instgroup': CtonInstGroup,
}
roles = {
'type': XRefRole(),
'inst': XRefRole(),
'instgroup': XRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objects = self.data['objects']
if target not in objects:
return None
obj = objects[target]
return make_refnode(builder, fromdocname, obj[0],
obj[1] + '-' + target, contnode, target)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
objects = self.data['objects']
if target not in objects:
return []
obj = objects[target]
return [('cton:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0],
obj[1] + '-' + target, contnode, target))]
class TypeDocumenter(sphinx.ext.autodoc.Documenter):
# Invoke with .. autoctontype::
objtype = 'ctontype'
# Convert into cton:type directives
domain = 'cton'
directivetype = 'type'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return False
def resolve_name(self, modname, parents, path, base):
return 'base.types', [base]
def add_content(self, more_content, no_docstring=False):
super(TypeDocumenter, self).add_content(more_content, no_docstring)
sourcename = self.get_sourcename()
membytes = self.object.membytes
if membytes:
self.add_line(u':bytes: {}'.format(membytes), sourcename)
else:
self.add_line(u':bytes: Can\'t be stored in memory', sourcename)
class InstDocumenter(sphinx.ext.autodoc.Documenter):
# Invoke with .. autoinst::
objtype = 'inst'
# Convert into cton:inst directives
domain = 'cton'
directivetype = 'inst'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return False
def resolve_name(self, modname, parents, path, base):
if path:
return path.rstrip('.'), [base]
else:
return 'base.instructions', [base]
def format_signature(self):
inst = self.object
sig = inst.name
if len(inst.outs) > 0:
sig = ', '.join([op.name for op in inst.outs]) + ' = ' + sig
if len(inst.ins) > 0:
op = inst.ins[0]
sig += ' ' + op.name
# If the first input is variable-args, this is 'return'. No parens.
if op.kind.name == 'variable_args':
sig += '...'.format(op.name)
for op in inst.ins[1:]:
# This is a call or branch with args in (...).
if op.kind.name == 'variable_args':
sig += '({}...)'.format(op.name)
else:
sig += ', ' + op.name
return sig
def add_directive_header(self, sig):
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'cton')
directive = getattr(self, 'directivetype', self.objtype)
sourcename = self.get_sourcename()
self.add_line(u'.. %s:%s:: %s' % (domain, directive, sig), sourcename)
if self.options.noindex:
self.add_line(u' :noindex:', sourcename)
def add_content(self, more_content, no_docstring=False):
super(InstDocumenter, self).add_content(more_content, no_docstring)
sourcename = self.get_sourcename()
inst = self.object
# Add inputs and outputs.
for op in inst.ins:
if op.is_value():
typ = op.typevar
else:
typ = op.kind
self.add_line(u':in {} {}: {}'.format(
typ, op.name, op.get_doc()), sourcename)
for op in inst.outs:
if op.is_value():
typ = op.typevar
else:
typ = op.kind
self.add_line(u':out {} {}: {}'.format(
typ, op.name, op.get_doc()), sourcename)
# Document type inference for polymorphic instructions.
if inst.is_polymorphic:
if inst.ctrl_typevar is not None:
if inst.use_typevar_operand:
tvopnum = inst.value_opnums[inst.format.typevar_operand]
self.add_line(
u':typevar {}: inferred from {}'
.format(
inst.ctrl_typevar.name,
inst.ins[tvopnum]),
sourcename)
else:
self.add_line(
u':typevar {}: explicitly provided'
.format(inst.ctrl_typevar.name),
sourcename)
for tv in inst.other_typevars:
self.add_line(
u':typevar {}: from input operand'.format(tv.name),
sourcename)
class InstGroupDocumenter(sphinx.ext.autodoc.ModuleLevelDocumenter):
# Invoke with .. autoinstgroup::
objtype = 'instgroup'
# Convert into cton:instgroup directives
domain = 'cton'
directivetype = 'instgroup'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return False
def format_name(self):
return "{}.{}".format(self.modname, ".".join(self.objpath))
def add_content(self, more_content, no_docstring=False):
super(InstGroupDocumenter, self).add_content(
more_content, no_docstring)
sourcename = self.get_sourcename()
indexed = self.env.domaindata['cton']['objects']
names = [inst.name for inst in self.object.instructions]
names.sort()
for name in names:
if name in indexed:
self.add_line(u':cton:inst:`{}`'.format(name), sourcename)
else:
self.add_line(u'``{}``'.format(name), sourcename)
def setup(app):
app.add_domain(CretonneDomain)
app.add_autodocumenter(TypeDocumenter)
app.add_autodocumenter(InstDocumenter)
app.add_autodocumenter(InstGroupDocumenter)
return {'version': '0.1'}
|
{
"content_hash": "cbef300275b910cc7f06c7bc67f4460d",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 79,
"avg_line_length": 32.3235294117647,
"alnum_prop": 0.5506658946149392,
"repo_name": "sunfishcode/cretonne",
"id": "a03acf11608222024cb94daa6152fb7a710f2138",
"size": "12389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/cton_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "575579"
},
{
"name": "Rust",
"bytes": "1675777"
},
{
"name": "Shell",
"bytes": "5642"
},
{
"name": "Vim script",
"bytes": "1505"
},
{
"name": "WebAssembly",
"bytes": "3548"
}
],
"symlink_target": ""
}
|
"""Netlink Module
netlink sockets are used to communicate with various kernel subsystems as
an RPC system. `man 7 netlink` for more information.
You can read all about netlink in rfc 3549, but in general the format for
requests is a netlink message header followed by a specific header
(corresponding to type) followed by an attribute list. This library was
primarily written for ipvs which uses the generalize protocol for netlink.
This means that type is not hardcoded based upon the message, but is
actually dynamic. The first part of the exchange is discovering which type
(or family id) you'll need for your application, and then sing that forward.
Here is a typical (IPVS) netlink message
0 1 2 3
+--------------+--------------+--------------+--------------+
| Total Message Length |
+--------------+--------------+--------------+--------------+
| Type | Flags |
+--------------+--------------+--------------+--------------+
| Sequence Number |
+--------------+--------------+--------------+--------------+
| PID |
+--------------+--------------+--------------+--------------+
| Command ID | Version | RESERVED |
+--------------+--------------+--------------+--------------+
| Attribute Length | Attribute Type |
+--------------+--------------+--------------+--------------+
| ... Attribute ... (Padded to 4 bytes) |
+--------------+--------------+--------------+--------------+
| Attribute Length | Attribute Type |
+--------------+--------------+--------------+--------------+
| ... Attribute ... (Padded to 4 bytes) |
+--------------+--------------+--------------+--------------+
The thing that makes implementing this interesting is that the attribute
types are very application specific and context sensitive. The type 1 in a
particular nested list might be a short while the type 1 in a different
nested list might be a single-letter nul-terminated string. In that case,
both Attribute Length and Attribute Type would be the same, but they'd be
semantically very different.
So, for this all to work, we must create attribute lists that know how to
pack and unpack their values, and messages that know what type of attribute
lists they have. This is accomplished with create_attr_list_type and
create_genl_message_type.
In some cases, some messages may not be supported when called from userland. In
such case, provide a attribute list of None.
Usage:
ListA = netlink.create_attr_list_type(
'ListA',
('SOME_SHORT', netlink.U16Type),
('SOME_STRING', netlink.NulStringType),
)
ListB = netlink.create_attr_list_type(
'ListB',
('ANOTHER_STRING', netlink.NulStringType),
('ANOTHER_SHORT', netlink.U16Type),
('LIST_A', ListA),
)
Msg = netlink.create_genl_message_type(
'Msg', 'SPECIFIED_KERNEL_NAME',
('COMMAND_1', ListA),
('COMMAND_2', None), # COMMAND_2 is not available via userland.
('COMMAND_3', ListB),
)
And at this point, you can begin sending and receiving `Msg`es to a netlink
socket.
# assume that we send command_1 and get back a command_2
sock.send(Msg('command_1', attr_list=ListA(
another_string='foo', another_short=10)))
reply = sock.recv()[0]
reply.get_attr_list().get('some_short') # is a short!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import array
import errno
import logging
import os
import socket
import struct
import subprocess
import threading
def _unset(x): # pragma: no cover
'''
Dummy function used in the code to find out if a default was set.
Using this function as a default allows to differenciate between a default
value of None and a default that was not set.
'''
return x ** 2
class MessageFlags(object):
REQUEST = 1
MULTI = 2
ACK = 4
ECHO = 8
DUMP_INTR = 16
ROOT = 0x100
MATCH = 0x200
ATOMIC = 0x400
DUMP = (ROOT | MATCH)
REPLACE = 0x100
EXCL = 0x200
CREATE = 0x400
APPEND = 0x800
ACK_REQUEST = (REQUEST | ACK)
MATCH_ROOT_REQUEST = (MATCH | ROOT | REQUEST)
def create_struct_fmt_type(fmt):
class StructFmtType:
@staticmethod
def pack(val):
return array.array(str('B'), struct.pack(str(fmt), val))
@staticmethod
def unpack(data):
return struct.unpack(str(fmt), data)[0]
return StructFmtType
U8Type = create_struct_fmt_type('=B')
U16Type = create_struct_fmt_type('=H')
U32Type = create_struct_fmt_type('=I')
I32Type = create_struct_fmt_type('=i')
U64Type = create_struct_fmt_type('=Q')
Net16Type = create_struct_fmt_type('!H')
Net32Type = create_struct_fmt_type('!I')
class RecursiveSelf(object):
pass
class IgnoreType(object):
@staticmethod
def unpack(val):
return None
class BinaryType(object):
@staticmethod
def pack(val):
return val
@staticmethod
def unpack(val):
return val
class NulStringType(object):
'''
Ensure the string is null terminated when packing and remove the trailing
\0 when unpacking.
'''
@staticmethod
def pack(val):
return val + '\0'
@staticmethod
def unpack(val):
assert val[-1] == '\0'
return val[:-1]
class AttrListPacker(object):
pass
def create_attr_list_type(class_name, *fields):
"""Create a new attr_list_type which is a class offering get and set
methods which is capable of serializing and deserializing itself from
netlink message. The fields are a bunch of tuples of name and a class
which should provide pack and unpack (except for in the case where we
know it will be used exclusively for serialization or deserialization).
attr_list_types can be used as packers in other attr_list_types. The
names and packers of the field should be taken from the appropriate
linux kernel header and source files.
"""
name_to_key = {}
key_to_name = {}
key_to_packer = {}
for i, (name, packer) in enumerate(fields):
key = i + 1
name_to_key[name.upper()] = key
key_to_name[key] = name
key_to_packer[key] = packer
class AttrListType(AttrListPacker):
def __init__(self, **kwargs):
self.attrs = {}
for k, v in kwargs.items():
if v is not None:
self.set(k, v)
def set(self, key, value):
if not isinstance(key, int):
key = name_to_key[key.upper()]
self.attrs[key] = value
def get(self, key, default=_unset):
try:
if not isinstance(key, int):
key = name_to_key[key.upper()]
return self.attrs[key]
except KeyError:
if default is not _unset:
return default
raise
def __repr__(self):
attrs = ['%s=%s' % (key_to_name[k].lower(), repr(v))
for k, v in self.attrs.items()]
return '%s(%s)' % (class_name, ', '.join(attrs))
@staticmethod
def pack(attr_list):
packed = array.array(str('B'))
for k, v in attr_list.attrs.items():
if key_to_packer[k] == RecursiveSelf:
x = AttrListType.pack(v)
else:
x = key_to_packer[k].pack(v)
alen = len(x) + 4
# TODO(agartrell): This is scary. In theory, we should OR
# 1 << 15 into the length if it is an instance of
# AttrListPacker, but this didn't work for some reason, so
# we're not going to.
packed.fromstring(struct.pack(str('=HH'), alen, k))
packed.fromstring(x)
packed.fromstring('\0' * ((4 - (len(x) % 4)) & 0x3))
return packed
@staticmethod
def unpack(data):
global global_nest
attr_list = AttrListType()
while len(data) > 0:
alen, k = struct.unpack(str('=HH'), data[:4])
alen = alen & 0x7fff
if key_to_packer[k] == RecursiveSelf:
v = AttrListType.unpack(data[4:alen])
else:
v = key_to_packer[k].unpack(data[4:alen])
attr_list.set(k, v)
data = data[((alen + 3) & (~3)):]
return attr_list
return AttrListType
def create_genl_message_type(class_name, family_id_or_name, *fields,
**kwargs):
"""Create a new genl_message_type which is a class offering the appropriate
members and is capable of serializing and deserializing itself from
netlink protocol. The fields are a bunch of tuples of command name and
a class which should provide pack and unpack (except for in the case
where we know it will be used exclusively for serialization or
deserialization). AFAICT, the packer should always be an
attr_list_type. The names and attr_list packers of the field should be
taken from the appropriate linux kernel header and source files.
This method further registers the new message type using the
@message_class decorator, which allows us to serialize and deserialize
it from any appropriate netlink socket instance.
"""
name_to_key = {}
key_to_name = {}
key_to_attr_list_type = {}
for i, (name, attr_list_type) in enumerate(fields):
key = i + 1
name_to_key[name.upper()] = key
key_to_name[key] = name
key_to_attr_list_type[key] = attr_list_type
@message_class
class MessageType:
family = family_id_or_name
required_modules = kwargs.get('required_modules', [])
def __init__(self, cmd, attr_list=_unset, version=0x1,
flags=MessageFlags.ACK_REQUEST):
if not isinstance(cmd, int):
self.cmd = name_to_key[cmd.upper()]
else:
self.cmd = cmd
self.version = version
self.flags = flags
if attr_list is _unset:
kls = key_to_attr_list_type[self.cmd]
self.attr_list = kls is not None and kls() or kls
else:
self.attr_list = attr_list
def get_attr_list(self):
return self.attr_list
def __repr__(self):
return '%s(cmd=%s, attr_list=%s, version=0x%x, flags=0x%x)' % (
class_name, repr(key_to_name[self.cmd]), repr(self.attr_list),
self.version, self.flags)
@staticmethod
def unpack(data):
cmd, version = struct.unpack(str('=BBxx'), data[:4])
attr_list = key_to_attr_list_type[cmd].unpack(data[4:])
return MessageType(cmd, attr_list)
@staticmethod
def pack(msg):
s = array.array(
str('B'), struct.pack(str('=BBxx'), msg.cmd, msg.version))
s.extend(key_to_attr_list_type[msg.cmd].pack(msg.attr_list))
return s
return MessageType
# This is a global map of unpackers. The @message_class decorator inserts
# new message classes into this map so it can be used for the purpose of
# deserializing netlink messages from NetlinkSocket::recv
__cmd_unpack_map = {
}
__to_lookup_on_init = set()
def message_class(msg_class):
assert msg_class.family not in __cmd_unpack_map, \
'Message class %d is already defined' % msg_class.family
assert msg_class.family not in [x.family for x in __to_lookup_on_init], \
'Message class %s is already defined' % msg_class.family
if not isinstance(msg_class.family, int):
__to_lookup_on_init.add(msg_class)
else:
__cmd_unpack_map[msg_class.family] = msg_class
return msg_class
def setup_message_classes(nlsock):
for msg_class in __to_lookup_on_init:
if not isinstance(msg_class.family, int):
msg_class.family = nlsock.resolve_family(msg_class.family)
__cmd_unpack_map[msg_class.family] = msg_class
__to_lookup_on_init.clear()
for family_id, msg_class in __cmd_unpack_map.iteritems():
for mod in getattr(msg_class, 'required_modules', []):
subprocess.check_call(['modprobe', mod])
def deserialize_message(data):
(n, typ, flags, seq, pid) = struct.unpack(str('=IHHII'), data[:16])
if typ not in __cmd_unpack_map:
raise Exception("Unregistered netlink type: %d" % typ)
msg = __cmd_unpack_map[typ].unpack(data[16:n])
msg.flags = flags
return msg, data[n:]
def serialize_message(msg, port_id, seq):
family = msg.__class__.family
flags = msg.flags
s = msg.__class__.pack(msg)
t = struct.pack(str('=IHHII'), len(s) + 16, family, flags, seq, port_id)
p = array.array(str('B'), t)
p.extend(s)
return p
# In order to discover family IDs, we'll need to exchange some Ctrl
# messages with the kernel. We declare these message types and attribute
# list types below.
CtrlOpsAttrList = create_attr_list_type(
'CtrlOpsAttrList',
('ID', U32Type),
('FLAGS', U32Type),
)
CtrlMcastGroupAttrList = create_attr_list_type(
'CtrlMcastGroupAttrList',
('NAME', NulStringType),
('ID', U32Type),
)
CtrlAttrList = create_attr_list_type(
'CtrlAttrList',
('FAMILY_ID', U16Type),
('FAMILY_NAME', NulStringType),
('VERSION', U32Type),
('HDRSIZE', U32Type),
('MAXATTR', U32Type),
('OPS', IgnoreType), # TODO: CtrlOpsAttrList
('MCAST_GROUPS', CtrlMcastGroupAttrList),
)
CtrlMessage = create_genl_message_type(
'CtrlMessage',
16,
# NEWFAMILY message is returned in response to a GETFAMILY request.
('NEWFAMILY', CtrlAttrList),
('DELFAMILY', None),
('GETFAMILY', CtrlAttrList),
('NEWOPS', None),
('DELOPS', None),
('GETOPS', None),
('NEWMCAST_GRP', None),
('DELMCAST_GRP', None),
('GETMCAST_GRP', None),
)
@message_class
class ErrorMessage(object):
family = 2
def __init__(self, error, msg):
self.error = error
self.msg = msg
def __repr__(self):
return 'ErrorMessage(error=%s, msg=%s)' % (
repr(self.error), repr(self.msg))
def __str__(self):
try:
error_str = '%s: %s' % (errno.errorcode[-self.error],
os.strerror(-self.error))
except KeyError:
error_str = str(self.error)
return '%s. Extra info: %s' % (error_str, self.msg)
@staticmethod
def unpack(data):
error = struct.unpack(str('=i'), data[:4])[0]
try:
msg = deserialize_message(data[4:])
except:
msg = None
return ErrorMessage(error=error, msg=msg)
@message_class
class DoneMessage(object):
family = 3
def __init__(self):
pass
@staticmethod
def unpack(data):
assert len(data) == 4
return DoneMessage()
@staticmethod
def pack():
return '\0\0\0\0'
class NetlinkSocket(object):
def __init__(self, verbose=False):
# NETLINK_GENERIC = 16
self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_DGRAM, 16)
self.sock.bind((0, 0))
self.port_id = self.sock.getsockname()[0]
self.seq = 0
self.lock = threading.Lock()
self.verbose = verbose
setup_message_classes(self)
def close(self):
self.sock.close()
self.sock = None
def resolve_family(self, family):
msg = CtrlMessage('getfamily', flags=MessageFlags.REQUEST)
msg.get_attr_list().set('family_name', family)
reply = self.query(msg)[0]
return reply.get_attr_list().get('family_id')
def _send(self, msg):
self.sock.send(serialize_message(msg, self.port_id, self.seq))
self.seq += 1
def _recv(self):
messages = []
while True:
# A big buffer to avoid truncating message.
# The size is borrowed from libnetlink.
data = self.sock.recv(16384)
while len(data) > 0:
msg, data = deserialize_message(data)
if len(messages) == 0 and msg.flags & 0x2 == 0:
return [msg]
elif isinstance(msg, DoneMessage):
return messages
messages.append(msg)
return messages
def query(self, request):
with self.lock:
try:
messages = None
self._send(request)
messages = self._recv()
for message in messages:
if isinstance(message, ErrorMessage):
raise RuntimeError(str(message))
return messages
except Exception as e:
if self.verbose:
logging.error("Netlink query failed: %s" % e)
logging.error("Sent Request: %s" % request)
logging.error("Recv Messages: %s" % messages)
raise
def execute(self, request):
with self.lock:
try:
messages = None
self._send(request)
messages = self._recv()
assert len(messages) == 1
assert isinstance(messages[0], ErrorMessage)
if messages[0].error != 0:
eno = -messages[0].error
raise OSError(eno, os.strerror(eno))
except Exception as e:
if self.verbose:
logging.error("Netlink execute failed: %s" % e)
logging.error("Sent Request: %s" % request)
logging.error("Recv Messages: %s" % messages)
raise
|
{
"content_hash": "2da9cff5cce2d018d61cab9ced7e0fbc",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 79,
"avg_line_length": 32.726618705035975,
"alnum_prop": 0.5585843042426907,
"repo_name": "nuclon/gnlpy",
"id": "285cb8b88c0f1762767c81dc0e67bbe0fb48ae56",
"size": "18492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netlink.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60143"
}
],
"symlink_target": ""
}
|
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
################### UNIT TEST ########################
import unittest
from Settings.TestData import TestData
from TestUnits.Test_main import Test_main
'''
@brief Test unit for detectEdges
'''
class Test_detectEdges(unittest.TestCase, Test_main, TestData):
def setUp(self):
'''
@brief Give all setups to the unit test.
'''
self.SetAllKey()
self.InitTestData()
#### IMPORTS #####
from Settings import Settings
from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.generalDetectors import detectEdges
self.Settings = Settings
self.detectEdges = detectEdges
##################
def tearDown(self):
'''
@brief Give all tear down steps.
Is runned even if the test failed.
'''
pass
def test_detectEdges(self):
'''
@brief Main start test function.
Append functions to test for this unit.
'''
###### START TEST #####
print 'Edge detection is tested by Heading/EdgeHeading test'
###########################
|
{
"content_hash": "d4f6c99963420d77dd7ed93fc9f54109",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 105,
"avg_line_length": 25.822222222222223,
"alnum_prop": 0.6566265060240963,
"repo_name": "hansehe/Wind-Blade-Inspection",
"id": "517bf9e9e3be5b30b3cc5a9b3dd51f2f35a20213",
"size": "1162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestUnits/Test_src/Test_DroneVision/Test_DroneVision_src/Test_imgProcessing/Test_featureDetection/Test_generalDetectors/Test_detectEdges.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2183232"
}
],
"symlink_target": ""
}
|
class Point():
def reset():
pass
# This gives ERROR
p = Point()
p.reset()
|
{
"content_hash": "804d0e9549348f8266e42209709768d9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 18,
"avg_line_length": 11.142857142857142,
"alnum_prop": 0.6153846153846154,
"repo_name": "wolfdale/Spaghetti-code",
"id": "09d5c8e60cd1e003a430b1338f7afb36277e63c7",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/forgot_self.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "117137"
},
{
"name": "C++",
"bytes": "21975"
},
{
"name": "CSS",
"bytes": "74"
},
{
"name": "Go",
"bytes": "12214"
},
{
"name": "HTML",
"bytes": "286"
},
{
"name": "Java",
"bytes": "95417"
},
{
"name": "JavaScript",
"bytes": "2041"
},
{
"name": "Python",
"bytes": "14477"
},
{
"name": "Ruby",
"bytes": "15"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
}
|
from .context_dto import ContextDTO
class QnASearchResultContext(ContextDTO):
"""Context object of the QnA.
:param is_context_only: To mark if a prompt is relevant only with a
previous question or not.
true - Do not include this QnA as search result for queries without
context
false - ignores context and includes this QnA in search result
:type is_context_only: bool
:param prompts: List of prompts associated with the answer.
:type prompts:
list[~azure.cognitiveservices.knowledge.qnamaker.models.PromptDTO]
"""
_validation = {
'prompts': {'max_items': 20},
}
_attribute_map = {
'is_context_only': {'key': 'isContextOnly', 'type': 'bool'},
'prompts': {'key': 'prompts', 'type': '[PromptDTO]'},
}
def __init__(self, **kwargs):
super(QnASearchResultContext, self).__init__(**kwargs)
|
{
"content_hash": "7ec1653b60a1da99f463ec186a3ed016",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 31.821428571428573,
"alnum_prop": 0.6453423120089786,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a8389973ecb8eabfff724dd1d22a2f382991d40a",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/models/qn_asearch_result_context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_kinesis
@mock_kinesis
def test_record_data_exceeds_1mb():
client = boto3.client("kinesis", region_name="us-east-1")
client.create_stream(StreamName="my_stream", ShardCount=1)
with pytest.raises(ClientError) as exc:
client.put_records(
Records=[{"Data": b"a" * (2**20 + 1), "PartitionKey": "key"}],
StreamName="my_stream",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"1 validation error detected: Value at 'records.1.member.data' failed to satisfy constraint: Member must have length less than or equal to 1048576"
)
@mock_kinesis
def test_total_record_data_exceeds_5mb():
client = boto3.client("kinesis", region_name="us-east-1")
client.create_stream(StreamName="my_stream", ShardCount=1)
with pytest.raises(ClientError) as exc:
client.put_records(
Records=[{"Data": b"a" * 2**20, "PartitionKey": "key"}] * 5,
StreamName="my_stream",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidArgumentException")
err["Message"].should.equal("Records size exceeds 5 MB limit")
@mock_kinesis
def test_too_many_records():
client = boto3.client("kinesis", region_name="us-east-1")
client.create_stream(StreamName="my_stream", ShardCount=1)
with pytest.raises(ClientError) as exc:
client.put_records(
Records=[{"Data": b"a", "PartitionKey": "key"}] * 501,
StreamName="my_stream",
)
err = exc.value.response["Error"]
err["Code"].should.equal("ValidationException")
err["Message"].should.equal(
"1 validation error detected: Value at 'records' failed to satisfy constraint: Member must have length less than or equal to 500"
)
|
{
"content_hash": "4f1ebf36d841d052ca32b0223f9c01bf",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 155,
"avg_line_length": 37.82692307692308,
"alnum_prop": 0.6578546009150992,
"repo_name": "spulec/moto",
"id": "5c5fa3a4d120083390143ad13a25d7cbaa24a0fd",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_kinesis/test_kinesis_stream_limits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
}
|
"""
Django settings for c2asm project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0p-$_-bc79noapo*s_6q_m0%%my259_3c^x%fnl(09&(1%q_if'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['c2asm.com', 'localhost', '127.0.0.1', '0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'c2asm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/assembly/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'c2asm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# STATICFILES_DIRS = [
# BASE_DIR + "/static/",
# "/var/www/static/",
# ]
STATIC_ROOT = "/var/www/html/c2asm.com/static"
STATICFILES_DIRS = [BASE_DIR + "/static",
]
STATIC_URL = '/static/'
|
{
"content_hash": "fc706901a171e355da170bcd70f7e9b4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 91,
"avg_line_length": 25.587786259541986,
"alnum_prop": 0.6781026252983293,
"repo_name": "vladimir-nazarenko/c2asm",
"id": "79b2f1ae865e3770bc2bd17c0174d0f4b4fedc2d",
"size": "3352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c2asm/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2094"
},
{
"name": "HTML",
"bytes": "3428"
},
{
"name": "Python",
"bytes": "10991"
},
{
"name": "Shell",
"bytes": "416"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from utils.serializers import resource_read_only
from ..models import Environment
class EnvironmentListSerializer(serializers.ModelSerializer):
class Meta:
model = Environment
field = '__all__'
read_only_fields = resource_read_only
class EnvironmentDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Environment
field = '__all__'
read_only_fields = resource_read_only + ('eid',)
|
{
"content_hash": "3d6f4418a3431a84337e23b89cd29249",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 27.5,
"alnum_prop": 0.6989898989898989,
"repo_name": "GuoDuanLZ/sdustoj-judge-webserver",
"id": "a9709261d4a1971352760423d514d68c6af63d74",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdustoj_server/judge/api_server/global_serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "61365"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="fillcolor", parent_name="layout.activeshape", **kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "69a668b64caf0d51c43c1f4bafb85bf0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 32.84615384615385,
"alnum_prop": 0.6135831381733021,
"repo_name": "plotly/plotly.py",
"id": "4f95513188bf5a6e2dd14c672c84825765b37b17",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/activeshape/_fillcolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
version = '2.0a9'
LONG_DESCRIPTION = """
Using django-avatar
===================
Basics
------
To integrate ``django-avatar`` with your site, there are relatively few things
that are required. A minimal integration can work like this:
1. List this application in the ``INSTALLED_APPS`` portion of your settings
file. Your settings file will look something like::
INSTALLED_APPS = (
# ...
'avatar',
)
2. Add the pagination urls to the end of your root urlconf. Your urlconf
will look something like::
urlpatterns = patterns('',
# ...
(r'^admin/(.*)', admin.site.root),
(r'^avatar/', include('avatar.urls')),
)
3. Somewhere in your template navigation scheme, link to the change avatar
page::
<a href="{% url 'avatar_change' %}">Change your avatar</a>
4. Wherever you want to display an avatar for a user, first load the avatar
template tags::
{% load avatar_tags %}
Then, use the ``avatar`` tag to display an avatar of a default size::
{% avatar user %}
Or specify a size (in pixels) explicitly::
{% avatar user 65 %}
5. Optionally customize ``avatar/change.html`` and
``avatar/confirm_delete.html`` to conform to your site's look and feel.
Views
-----
There are only two views for this application: one for changing a user's avatar,
and another for deleting a user's avatar.
Changing an avatar
~~~~~~~~~~~~~~~~~~
The actual view function is located at ``avatar.views.change``, and this can
be referenced by the url name ``avatar_change``. It takes two keyword
arguments: ``extra_context`` and ``next_override``. If ``extra_context`` is
provided, that context will be placed into the template's context.
If ``next_override`` is provided, the user will be redirected to the specified
URL after form submission. Otherwise the user will be redirected to the URL
specified in the ``next`` parameter in ``request.POST``. If ``request.POST``
has no ``next`` parameter, ``request.GET`` will be searched. If ``request.GET``
has no ``next`` parameter, the ``HTTP_REFERER`` header will be inspected. If
that header does not exist, the user will be redirected back to the current URL.
Deleting an avatar
~~~~~~~~~~~~~~~~~~
The actual view function is located at ``avatar.views.delete``, and this can be
referenced by the url name ``avatar_delete``. It takes the same two keyword
arguments as ``avatar.views.change`` and follows the same redirection rules
as well.
Template Tags
-------------
To begin using these template tags, you must first load the tags into the
template rendering system:
{% load avatar_tags %}
``{% avatar_url user [size in pixels] %}``
Renders the URL of the avatar for the given user. User can be either a
``django.contrib.auth.models.User`` object instance or a username.
``{% avatar user [size in pixels] %}``
Renders an HTML ``img`` tag for the given user for the specified size. User
can be either a ``django.contrib.auth.models.User`` object instance or a
username.
``{% render_avatar avatar [size in pixels] %}``
Given an actual ``avatar.models.Avatar`` object instance, renders an HTML
``img`` tag to represent that avatar at the requested size.
Global Settings
---------------
There are a number of settings available to easily customize the avatars that
appear on the site. Listed below are those settings:
AVATAR_GRAVATAR_BASE_URL
The base URL where to get avatars at gravatar.com. Defaults to ``http://www.gravatar.com/avatar/``.
AUTO_GENERATE_AVATAR_SIZES
An iterable of integers representing the sizes of avatars to generate on
upload. This can save rendering time later on if you pre-generate the
resized versions. Defaults to ``(80,)``
AVATAR_RESIZE_METHOD
The method to use when resizing images, based on the options available in
PIL. Defaults to ``Image.ANTIALIAS``.
AVATAR_STORAGE_DIR
The directory under ``MEDIA_ROOT`` to store the images. If using a
non-filesystem storage device, this will simply be appended to the beginning
of the file name.
AVATAR_GRAVATAR_BACKUP
A boolean determining whether to default to the Gravatar service if no
``Avatar`` instance is found in the system for the given user. Defaults to
True.
AVATAR_DEFAULT_URL
The default URL to default to if ``AVATAR_GRAVATAR_BACKUP`` is set to False
and there is no ``Avatar`` instance found in the system for the given user.
Management Commands
-------------------
This application does include one management command: ``rebuild_avatars``. It
takes no arguments and, when run, re-renders all of the thumbnails for all of
the avatars for the pixel sizes specified in the ``AUTO_GENERATE_AVATAR_SIZES``
setting.
"""
setup(
name='django-avatar',
version=version,
description="django-avatar",
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='avatar,django',
author='Eric Florenzano',
author_email='floguy@gmail.com',
maintainer='Jannis Leidel',
maintainer_email='jannis@leidel.info',
url='http://github.com/jezdez/django-avatar/',
license='BSD',
packages=find_packages(),
package_data={
'avatar': [
'templates/notification/*/*.*',
'templates/avatar/*.html',
'locale/*/LC_MESSAGES/*',
'media/avatar/img/default.jpg',
'testdata/*',
],
},
include_package_data=True,
zip_safe=False,
)
|
{
"content_hash": "8763a9999e98bf712d7726ea86788db7",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 103,
"avg_line_length": 32.45810055865922,
"alnum_prop": 0.6690189328743545,
"repo_name": "TomLottermann/django-avatar",
"id": "04f9aa7a59a21732088f122b9f8292101b466243",
"size": "5810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41192"
}
],
"symlink_target": ""
}
|
default_app_config = 'microservices.apps.MicroservicesConfig'
|
{
"content_hash": "aa36081c117fc3c6a5b398678a864e34",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 61,
"avg_line_length": 62,
"alnum_prop": 0.8387096774193549,
"repo_name": "gabor-boros/django-microservices",
"id": "612c18a260ec50dc690ea45ff50a64e5d806eb01",
"size": "87",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microservices/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18740"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
}
|
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import utils
from tensorflow.python.util import compat
def tearDownModule():
file_io.delete_recursively(tf.test.get_temp_dir())
class SavedModelTest(tf.test.TestCase):
def testSequence(self):
export_dir = os.path.join(tf.test.get_temp_dir(), "sequence")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("tags"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, [constants.TAG_TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(43, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(43, v.eval())
builder.add_meta_graph([constants.TAG_SERVING])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(44, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(44, v.eval())
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, [constants.TAG_TRAINING], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, [constants.TAG_SERVING], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("variables"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=tf.Graph()) as sess:
v1 = tf.Variable(1, name="v1")
v2 = tf.Variable(2, name="v2")
sess.run(tf.initialize_all_variables())
self.assertEqual(1, v1.eval())
self.assertEqual(2, v2.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=tf.Graph()) as sess:
v2 = tf.Variable(3, name="v2")
sess.run(tf.initialize_all_variables())
self.assertEqual(3, v2.eval())
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=tf.Graph()) as sess:
v3 = tf.Variable(4, name="v3")
sess.run(tf.initialize_all_variables())
self.assertEqual(4, v3.eval())
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testSaveAsText(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("astext"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(43, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(43, v.eval())
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
def testCollections(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("collections"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
tf.add_to_collection("foo_vars", v)
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(43, name="v")
tf.add_to_collection("bar_vars", v)
sess.run(tf.initialize_all_variables())
self.assertEqual(43, v.eval())
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = tf.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(tf.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = tf.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(tf.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()),
compat.as_bytes("signature_defs"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
# Build and populate an empty SignatureDef for testing.
foo_signature = utils.build_signature_def(dict(), dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(43, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(43, v.eval())
# Build and populate a different SignatureDef for testing.
bar_signature = utils.build_signature_def(dict(), dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = utils.build_signature_def(dict(), dict(), "foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={"bar_key": bar_signature,
"foo_key": foo_new_signature})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.test_session(graph=tf.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.test_session(graph=tf.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(42, tf.get_collection(tf.GraphKeys.VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testAssets(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("with-assets"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=tf.Graph()) as sess:
v = tf.Variable(42, name="v")
sess.run(tf.initialize_all_variables())
self.assertEqual(42, v.eval())
# Build an asset collection.
asset_filepath = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()),
compat.as_bytes("hello42.txt"))
file_io.write_string_to_file(asset_filepath, "foo bar baz")
asset_file_tensor = tf.constant(asset_filepath, name="asset_file_tensor")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
ignored_filepath = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()),
compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=tf.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
# Validate the assets.
collection_def = foo_graph.collection_def
assets_any = collection_def[constants.ASSETS_KEY].any_list.value
self.assertEqual(len(assets_any), 1)
asset = meta_graph_pb2.AssetFileDef()
assets_any[0].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt"))
asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual("foo bar baz", compat.as_text(asset_contents))
self.assertEqual("hello42.txt", asset.filename)
self.assertEqual("asset_file_tensor:0", asset.tensor_info.name)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testOp(self):
export_dir = os.path.join(
compat.as_bytes(tf.test.get_temp_dir()), compat.as_bytes("op"))
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(
graph=tf.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = tf.Variable(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = tf.Variable(1, name="v3", trainable=False, collections=[])
assign_v3 = tf.assign(v3, tf.add(v1, v2))
init_op = tf.group(assign_v3, name="init_op")
tf.add_to_collection("v", v1)
tf.add_to_collection("v", v2)
tf.add_to_collection("v", v3)
tf.add_to_collection("init_op", init_op)
sess.run(tf.initialize_all_variables())
self.assertEqual(1, tf.get_collection("v")[0].eval())
self.assertEqual(2, tf.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with tf.Session(
graph=tf.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, tf.get_collection("v")[0].eval())
self.assertEqual(2, tf.get_collection("v")[1].eval())
tf.get_collection("init_op")[0].run()
self.assertEqual(3, tf.get_collection("v")[2].eval())
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "6732b85e4ae060b656405267cba8755c",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 80,
"avg_line_length": 42.268115942028984,
"alnum_prop": 0.6650094291102349,
"repo_name": "naturali/tensorflow",
"id": "0e33250e28b4c508851f07a51814d18323d3129f",
"size": "18189",
"binary": false,
"copies": "2",
"ref": "refs/heads/r0.11",
"path": "tensorflow/python/saved_model/saved_model_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "159351"
},
{
"name": "C++",
"bytes": "9498060"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "787519"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "12318"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "114983"
},
{
"name": "Python",
"bytes": "7015287"
},
{
"name": "Shell",
"bytes": "201064"
},
{
"name": "TypeScript",
"bytes": "414414"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
import stuff.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RunSQL('CREATE EXTENSION IF NOT EXISTS citext'),
migrations.AlterField(
model_name='user',
name='email',
field=stuff.fields.CaseInsensitiveEmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='username',
field=stuff.fields.CaseInsensitiveCharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Username must match the name in the game IL-2 (including squad tag).', max_length=64, unique=True, verbose_name='username'),
),
]
|
{
"content_hash": "6e0f2817d32c8b1e39629630fb6455a1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 263,
"avg_line_length": 34.96,
"alnum_prop": 0.6430205949656751,
"repo_name": "Flyingfox646/flyingfox",
"id": "a92626c29b52d44cb85261e535c963c3b1830d83",
"size": "946",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/users/migrations/0002_ci_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1515"
},
{
"name": "CSS",
"bytes": "103959"
},
{
"name": "HTML",
"bytes": "317380"
},
{
"name": "JavaScript",
"bytes": "17458"
},
{
"name": "Python",
"bytes": "415174"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.