text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sub1.file1
# Alternative
#from sub1 import file1
def main():
print('/top')
sub1.file1.foo()
main()
|
{
"content_hash": "46a08a4bea727b01a52431d971f4fd31",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 23,
"avg_line_length": 11.7,
"alnum_prop": 0.6495726495726496,
"repo_name": "myriasofo/CLRS_exercises",
"id": "dcaa9a2e364dd082cc8769c636046ad7cf8ca13d",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "languages/python/relativeImports/top.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40044"
}
],
"symlink_target": ""
}
|
import os
""" IMPORTANT: Not sure how to use this lib? Check out the "example.py" file :) """
Version="0.2"
class Handler:
""" Alfred.Handler, this class is responsible for handling Alfred! """
def __init__(self, args=[], query="", use_no_query_string=True):
""" Create a new handler
Keyword arguments:
args -- This list should be *sys.argv* (default: [])
query -- This string should only be used if args is not set!
use_no_query_string -- If there is no query, should the handler use "NO QUERY" instead of one?
"""
if type(args) != list:
raise TypeError("Alfred.Handler(args): args is no list!")
if len(args) > 1:
self.query = args[1]
elif query != "":
self.query = query
else:
if use_no_query_string:
self.query = "EMPTY_QUERY"
else:
self.query = ""
self.items = []
def get_current_directory(self):
return os.getcwd()
def query_is_empty(self):
if self.query == "EMPTY_QUERY" or self.query == "":
return True
else:
return False
def add_item(self, item):
""" Adds a new Alfred.Item to this handler
Keyword arguments:
item -- The Alfred.Item you want to add ;)
"""
if not isinstance(item, Item):
raise TypeError("Alfred.Handler.add_item(item): item is no instance of Alfred.Item")
self.items.append(item)
def add_new_item(self, title="", subtitle="", uid=None, arg="", icon=None):
""" Adds a new Item to this handler without using the Alfred.Item class!
Keyword arguments:
title -- The title of this item
subtitle -- The subtitle of this item
uid -- The uid of this item (default: None)
arg -- The argument of this item
icon -- The icon of this item (Default: None)
"""
self.add_item(Item(title, subtitle, uid, arg, icon))
def __str__(self):
return self.to_xml()
def to_xml(self, max_results=None):
""" Generates a XML string
Keyword arguments:
max_results -- How many results should be in this string? (Default: None - No limitation)
"""
xml_string = '<?xml version="1.0" encoding="UTF-8" ?>'
xml_string += '<items>'
counter = 0
for item in self.items:
xml_string += item.__str__()
counter += 1
if max_results is not None and counter >= max_results:
break
xml_string += '</items>'
return xml_string
def push(self, max_results=None):
""" Push the content to Alfred
Keyword arguments:
max_results -- How many results should be in this string? (Default: None - No limitation)
"""
print(self.to_xml(max_results))
def test_push(self, max_results=None):
""" Sometimes it's faster to just do stuff in your editor instead of opening Alfred ;)
Keyword arguments:
max_results -- How many results should be in this string? (Default: None - No limitation)
"""
counter = 1
for item in self.items:
print("Entry #%s:" % counter)
print("\tTitle: %s" % item.title)
print("\tSubtitle: %s" % item.subtitle)
print("\tArguments: %s, Icon: %s" % (item.arg, item.icon))
print("-"*30)
counter += 1
class Item:
def __init__(self, title="", subtitle="", uid=None, arg="", icon=None):
""" Creates a new Item for Alfred
Keyword arguments:
title -- The title of this item
subtitle -- The subtitle of this item
uid -- The uid of this item (default: None)
arg -- The argument of this item
icon -- The icon of this item (Default: None)
"""
self.title = title
self.subtitle = subtitle
self.uid = uid
self.arg = arg
self.icon = icon
def __str__(self):
title = '<title>%s</title>' % self.title
subtitle = '<subtitle>%s</subtitle>' % self.subtitle
icon = ''
args = ''
if self.icon is not None:
icon = '<icon>%s</icon>' % self.icon
if self.arg is not None:
args = '<arg>%s</arg>' % self.arg
item_content = "%s%s%s%s" % (title, subtitle, icon, args)
item_info = '<item uid="%s">%s</item>' % (self.uid, item_content)
return item_info
|
{
"content_hash": "1ad964e6b4df8566cd1bd33686c1eaed",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 96,
"avg_line_length": 24.20625,
"alnum_prop": 0.6395558998192615,
"repo_name": "kasoki/hue",
"id": "b384d440108d096e8d3b5de694f16cad1dff7997",
"size": "5056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Alfred.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34506"
}
],
"symlink_target": ""
}
|
import sys
import mock
from oslo_config import cfg
import testtools
from webob import exc
import webtest
from neutron.api import extensions
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions
from neutron import context
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_get_path = test_base._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self.api = None
self.plugin = None
super(QuotaExtensionTestCase, self).tearDown()
def _test_quota_default_values(self, expected_values):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
for resource, expected_value in expected_values.items():
self.assertEqual(expected_value,
quota['quota'][resource])
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
cfg.CONF.set_override(
'quota_network', -10, group='QUOTAS')
cfg.CONF.set_override(
'quota_subnet', -50, group='QUOTAS')
self._test_quota_default_values(
{'network': -1,
'subnet': -1,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_out_of_range_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_limit_check_with_not_registered_resource_fails(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.limit_check,
context.get_admin_context(load_admin_roles=False),
tenant_id,
foobar=1)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas)
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
|
{
"content_hash": "8e8d234b149a946c0348999d47ffc168",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 78,
"avg_line_length": 43.50454545454546,
"alnum_prop": 0.5452930728241563,
"repo_name": "antonioUnina/neutron",
"id": "af87c4a76992005957478c3e1ed383a2602b14d4",
"size": "19779",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/extensions/test_quotasv2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7250407"
},
{
"name": "Shell",
"bytes": "12332"
}
],
"symlink_target": ""
}
|
from django.views.generic import ListView
from django.db.models import Q
from ..professors.models import Professor
class SearchView(ListView):
queryset = Professor.objects.all().order_by('-first_name', 'last_name')
template_name = 'search.html'
def get_context_data(self):
context = super(SearchView, self).get_context_data()
context.update({
'search_term': self.request.GET.get('q', ''),
'navbarSearchShow': True
})
return context
def get_queryset(self):
queryset = super(SearchView, self).get_queryset()
search_term = self.request.GET.get('q')
if search_term:
for term in search_term.split():
qs = Professor.objects.search(term, raw=True)
return qs
return queryset[:10]
|
{
"content_hash": "d4bac24a4ef1ca7413c1a22da4162c5b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 28.413793103448278,
"alnum_prop": 0.6140776699029126,
"repo_name": "Jpadilla1/notaso",
"id": "be8058d17d52fcbef2dd9bd78ac1e9f967be9a83",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notaso/search/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16583"
},
{
"name": "HTML",
"bytes": "118520"
},
{
"name": "JavaScript",
"bytes": "4052"
},
{
"name": "Python",
"bytes": "80984"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mlvpn'
copyright = u'2012-2015, Laurent COUSTET'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = 'unknown'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mlvpndoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'mlvpn.tex', u'mlvpn Documentation',
u'Laurent COUSTET', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mlvpn', u'mlvpn Documentation',
[u'Laurent COUSTET'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mlvpn', u'mlvpn Documentation',
u'Laurent COUSTET', 'mlvpn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
def override_release_version():
basepath = os.path.dirname(os.path.abspath(__file__))
import subprocess
cwd = os.path.abspath(os.path.join(basepath, "..", ".."))
print("cwd: %s" % cwd)
out = subprocess.check_output(
["./git-version-gen", ".tarball-version"],
cwd=cwd, shell=False)
version = out.split(".")[0]
release = out
return (version, release)
version, release = override_release_version()
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = "default"
|
{
"content_hash": "58e153e2d17307038220bf75af1b716e",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 80,
"avg_line_length": 31.444444444444443,
"alnum_prop": 0.6956082786471479,
"repo_name": "jedisct1/MLVPN",
"id": "d27e431c65f673f8f277dfab499c7e6a03dd6cb5",
"size": "8340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "218405"
},
{
"name": "C++",
"bytes": "29"
},
{
"name": "M4",
"bytes": "8041"
},
{
"name": "Makefile",
"bytes": "2315"
},
{
"name": "Shell",
"bytes": "8011"
}
],
"symlink_target": ""
}
|
"""Controllers for interactive and non-interactive widgets."""
__author__ = 'sll@google.com (Sean Lip)'
import collections
from core.controllers import base
from core.domain import widget_registry
class WidgetRepositoryHandler(base.BaseHandler):
"""Populates the widget repository pages."""
def get(self, widget_type):
"""Handles GET requests."""
try:
widget_list = widget_registry.Registry.get_widgets_of_type(
widget_type)
except Exception:
raise self.PageNotFoundException
widgets = collections.defaultdict(list)
for widget in widget_list:
widgets[widget.category].append(
widget.get_widget_instance_dict({}, {}))
for category in widgets:
widgets[category].sort()
response = {'widgetRepository': widgets}
self.render_json(response)
class WidgetHandler(base.BaseHandler):
"""Returns instance dicts for individual widgets."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
def post(self, widget_type, widget_id):
"""Handles POST requests for parameterized widgets."""
customization_args = self.payload.get('customization_args', {})
widget = widget_registry.Registry.get_widget_by_id(
widget_type, widget_id)
result = {
'widget': widget.get_widget_instance_dict(
customization_args, {}, preview_mode=True),
}
self.render_json(result)
|
{
"content_hash": "bfba089e11e3b27212e77904f75215a4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 71,
"avg_line_length": 28.150943396226417,
"alnum_prop": 0.6327077747989276,
"repo_name": "openhatch/oh-missions-oppia-beta",
"id": "8040d650c6f799a13e653a100d133a3c9b16a0f9",
"size": "2097",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "core/controllers/widgets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "882"
},
{
"name": "CSS",
"bytes": "18641"
},
{
"name": "JavaScript",
"bytes": "388650"
},
{
"name": "Python",
"bytes": "815127"
},
{
"name": "Shell",
"bytes": "24026"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, render_template, session, flash, redirect, \
url_for, jsonify, make_response
app = Flask(__name__)
@app.route('/')
def index():
import datetime
import io
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
fig=Figure()
ax=fig.add_subplot(111)
x=[]
y=[]
now=datetime.datetime.now()
delta=datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now+=delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas=FigureCanvas(fig)
png_output = io.BytesIO()
canvas.print_png(png_output)
response=make_response(png_output.getvalue())
response.headers['Content-Type'] = 'image/png'
return response
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "578491a549fe5d50d5d3449131ca6b27",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.6527777777777778,
"repo_name": "stepbot/resiliant-trader",
"id": "f7f1b98014f7cd5f0dc9e5daa8005336963e4b7a",
"size": "1008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3405"
},
{
"name": "Jupyter Notebook",
"bytes": "765766"
},
{
"name": "Python",
"bytes": "52369"
}
],
"symlink_target": ""
}
|
import bot
class Module(bot.Module):
index = "msg"
def register(self):
self.addcommand(self.say, "say", "Send a message to a channel.",
["[-notice]", "channel", "message..."])
self.addcommand(self.do, "do", "Send a /me message to a channel.",
["channel", "message..."])
def say(self, context, args):
if args.getstr("channel") not in self.server.channels:
return "That channel has not been joined."
context.exceptrights(["admin", "%s,op" % args.getstr("channel")])
command = "NOTICE" if args.getbool('notice') else "PRIVMSG"
self.server.sendto(command, args.getstr("channel"),
args.getstr("message"))
def do(self, context, args):
if args.getstr("channel") not in self.server.channels:
return "That channel has not been joined."
context.exceptrights(["admin", "%s,op" % args.getstr("channel")])
self.server.sendto("PRIVMSG", args.getstr("channel"),
"\1ACTION %s\1" % args.getstr("message"))
bot.register.module(Module)
|
{
"content_hash": "23548d9d6eb96833eb11f57a426e1f70",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 36.774193548387096,
"alnum_prop": 0.5666666666666667,
"repo_name": "shacknetisp/vepybot",
"id": "cc9a673c845d771402f8fa57676a0ebb590795d5",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/protocols/irc/core/msg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186378"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0002_auto_20170125_0218'),
]
operations = [
migrations.AlterField(
model_name='albums',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
]
|
{
"content_hash": "e934cea187667ccde02251e72bd21765",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 29.46511627906977,
"alnum_prop": 0.5524861878453039,
"repo_name": "AveryPratt/django-imager",
"id": "ea5b034a53fdf9a117ca9edd1e85c6918b051a09",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/migrations/0003_auto_20170127_1757.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47656"
},
{
"name": "HTML",
"bytes": "25527"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "52210"
}
],
"symlink_target": ""
}
|
"""Job registries."""
from core.domain import activity_jobs_one_off
from core.domain import collection_jobs_one_off
from core.domain import exp_jobs_one_off
from core.domain import feedback_jobs_one_off
from core.domain import feedback_jobs_continuous
from core.domain import stats_jobs_continuous
from core.domain import stats_jobs_one_off
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import email_jobs_one_off
from core.domain import recommendations_jobs_one_off
# List of all manager classes for one-off batch jobs for which to show controls
# on the admin dashboard.
ONE_OFF_JOB_MANAGERS = [
activity_jobs_one_off.IndexAllActivitiesJobManager,
collection_jobs_one_off.CollectionMigrationJob,
email_jobs_one_off.EmailHashRegenerationOneOffJob,
exp_jobs_one_off.ExpSummariesContributorsOneOffJob,
exp_jobs_one_off.ExpSummariesCreationOneOffJob,
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob,
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob,
exp_jobs_one_off.ExplorationMigrationJobManager,
exp_jobs_one_off.ExplorationValidityJobManager,
exp_jobs_one_off.ItemSelectionInteractionOneOffJob,
exp_jobs_one_off.ViewableExplorationsAuditJob,
feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob,
recommendations_jobs_one_off.ExplorationRecommendationsOneOffJob,
stats_jobs_one_off.StatisticsAudit,
user_jobs_one_off.DashboardSubscriptionsOneOffJob,
user_jobs_one_off.LongUserBiosOneOffJob,
user_jobs_one_off.UserContributionsOneOffJob,
user_jobs_one_off.UserDefaultDashboardOneOffJob,
user_jobs_one_off.UserFirstContributionMsecOneOffJob,
user_jobs_one_off.UserLastExplorationActivityOneOffJob,
user_jobs_one_off.UserProfilePictureOneOffJob,
user_jobs_one_off.UsernameLengthDistributionOneOffJob,
]
# List of all ContinuousComputation managers to show controls for on the
# admin dashboard.
# NOTE TO DEVELOPERS: When a new ContinuousComputation manager is defined,
# it should be registered here.
ALL_CONTINUOUS_COMPUTATION_MANAGERS = [
feedback_jobs_continuous.FeedbackAnalyticsAggregator,
stats_jobs_continuous.InteractionAnswerSummariesAggregator,
stats_jobs_continuous.StatisticsAggregator,
user_jobs_continuous.DashboardRecentUpdatesAggregator,
user_jobs_continuous.UserStatsAggregator,
]
class ContinuousComputationEventDispatcher(object):
"""Dispatches events to the relevant ContinuousComputation classes."""
@classmethod
def dispatch_event(cls, event_type, *args, **kwargs):
"""Dispatches an incoming event to the ContinuousComputation
classes which listen to events of that type.
"""
for klass in ALL_CONTINUOUS_COMPUTATION_MANAGERS:
if event_type in klass.get_event_types_listened_to():
klass.on_incoming_event(event_type, *args, **kwargs)
|
{
"content_hash": "a32bc62a5c73e83443cae55c2b26eb11",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 44.84615384615385,
"alnum_prop": 0.7886792452830189,
"repo_name": "terrameijar/oppia",
"id": "24a9813b3cde23d38b836498f4a66046ca53f8d5",
"size": "3538",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "core/jobs_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "95342"
},
{
"name": "HTML",
"bytes": "850374"
},
{
"name": "JavaScript",
"bytes": "2597367"
},
{
"name": "Python",
"bytes": "3177521"
},
{
"name": "Shell",
"bytes": "46904"
}
],
"symlink_target": ""
}
|
from copy import copy
from types import GeneratorType
from unittest import TestCase
try:
from unittest import mock
except:
import mock
from rl.pool import MemoryPool
class MemoryPoolTest(TestCase):
def setUp(self):
self.pool = MemoryPool(5000)
self.test_data = {
0: {
'state': 1, 'action': 3, 'reward': 100,
'next_state': 6, 'priority': 1
},
1: {
'state': 2, 'action': 3, 'reward': 0,
'next_state': 7, 'priority': 2
},
-1: {
'state': 3, 'action': 3, 'reward': -100,
'next_state': 8, 'priority': 3
},
2: {
'state': 4, 'action': 3, 'reward': 0,
'next_state': 9, 'priority': 4
},
4: {
'state': 5, 'action': 3, 'reward': 100,
'next_state': 10, 'priority': 5
},
}
self.pool._experiences = copy(self.test_data)
def tearDown(self):
self.pool = None
@mock.patch('rl.pool.MemoryPool.amount')
def test_add(self, mock_amount):
# Handle mock amount
mock_amount.side_effect = lambda *args: len(self.pool._experiences)
return_value = self.pool.add(6, '456', -100, None, [1213, 'a'], 1)
self.assertEqual(self.pool._q_front, 3)
self.assertEqual(return_value, 6)
def test_add_negative(self):
self.pool.add(6, '456', -100, None, [1213, 'a'], True, -1)
self.assertEqual(self.pool._q_front, 3)
self.assertEqual(
self.pool._experiences[self.pool._q_front]['priority'], 1e-3
)
@mock.patch('rl.pool.MemoryPool.amount')
def test_remove(self, mock_amount):
# Handle mock amount
mock_amount.return_value = len(self.pool._experiences)
return_value = self.pool.remove(4)
self.assertNotIn(4, self.pool._experiences)
self.assertEqual(return_value, self.test_data[4])
def test_sample(self):
for key, record in self.pool.sample(5):
self.assertEqual(record, self.test_data[key])
def test_sample_greater(self):
for key, record in self.pool.sample(10):
self.assertEqual(record, self.test_data[key])
def test_update(self):
data = [(0, 5), (1, 4), (-1, 3), (2, 2), (4, 1)]
self.pool.update(data)
for i in range(len(data)):
self.assertEqual(self.pool._experiences[data[i][0]]['priority'], data[i][1])
def test_update_negative(self):
self.pool.update([(0, 1), (1, -1), (2, 0), (3, 1), (4, 0)])
self.assertEqual(self.pool._experiences[1]['priority'], 1e-3)
def test_size(self):
self.assertEqual(self.pool.size(), 5000)
def test_amount(self):
self.assertEqual(self.pool.amount(), 5)
def test_all(self):
self.assertIsInstance(self.pool.all(), GeneratorType)
result_dict = {}
for key, record in self.pool.all():
result_dict[key] = record
for key, record in self.test_data.items():
self.assertEqual(record, self.test_data[key])
|
{
"content_hash": "666b278c32cfd8680077b4136cfd1a3a",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 32.101010101010104,
"alnum_prop": 0.5412208936438011,
"repo_name": "evan176/rl",
"id": "ab95bb3ea7086000f00cde7a83efb829f1982f6f",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "963"
},
{
"name": "Python",
"bytes": "79128"
}
],
"symlink_target": ""
}
|
from itertools import chain, combinations, permutations
class RankedPairsResult:
def __init__(self, tallies, winners):
self.tallies = tallies
self.winners = winners
def results(responses):
"""
Returns the list of ranked-pairs winners based on responses.
Takes as input a list of rankings, e.g. [
[['A'], ['B'], ['NO']], # A, then B, then NO, then C
[['A', 'C'], ['B']], # A or C, then B, then NO
[['NO']] # NO, then A or B or C
]
"""
all_candidates = set(vote
for response in responses for rank in response
for vote in rank)
tallies = { # mapping of pairs (A, B) of candidates
pair: 0 # to numbers of responders who ranked A above B
for pair in permutations(all_candidates, 2)
}
for response in responses:
ranked = set(vote for rank in response for vote in rank)
ranks = chain(response, (all_candidates - ranked, ))
for rank_A, rank_B in combinations(ranks, 2):
for A in rank_A:
for B in rank_B:
tallies[A, B] += 1
def tally_ranking(pair):
"""
The keyfunction which implements the 'ranking' in ranked pairs.
Sorts pairs by highest in favor, or if equal, fewest opposed.
"""
A, B = pair
return (-tallies[A, B], tallies[B, A])
possible_pairs = sorted(tallies, key=tally_ranking)
# Vertices reachable from A in win graph
lower = {A: set((A, )) for A in all_candidates}
# Vertices A is reachable from in win graph
higher = {A: set((A, )) for A in all_candidates}
for A, B in possible_pairs:
if A not in lower[B]: # if we don't already have B > A, set A > B
for s in higher[A]: # if s > ... > A
for t in lower[B]: # and B > ... > t
lower[s].add(t) # then s > ... > t
higher[t].add(s)
winners = sorted(all_candidates, key=lambda A: len(higher[A]))
return RankedPairsResult(tallies, winners)
|
{
"content_hash": "5a2ddf3d75321b30744bc3c128cc0b64",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 74,
"avg_line_length": 39.01851851851852,
"alnum_prop": 0.5472235405790223,
"repo_name": "ASCIT/donut-python",
"id": "92e60176fca3aa634281667dd047a9d0b1a49cbf",
"size": "2107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "donut/modules/voting/ranked_pairs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "532"
},
{
"name": "HTML",
"bytes": "13004"
},
{
"name": "Python",
"bytes": "3957"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import datetime
import logging
from google.appengine.api import datastore_errors
from google.appengine.datastore import datastore_query
from google.appengine.ext import deferred
from dashboard.api import api_request_handler
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.pinpoint.models import job
_BATCH_SIZE = 50
_STATUS_KEY = 'job_migration_status'
class Migrate(api_request_handler.ApiRequestHandler):
def _CheckUser(self):
self._CheckIsLoggedIn()
if not utils.IsAdministrator():
raise api_request_handler.ForbiddenError()
def Get(self):
return stored_object.Get(_STATUS_KEY) or {}
def Post(self):
status = stored_object.Get(_STATUS_KEY)
if not status:
_Start()
return self.Get()
def _Start():
query = job.Job.query(job.Job.task == None)
status = {
'count': 0,
'started': datetime.datetime.now().isoformat(),
'total': query.count(),
'errors': 0,
}
stored_object.Set(_STATUS_KEY, status)
deferred.defer(_Migrate, status, None)
def _Migrate(status, cursor=None):
if cursor:
cursor = datastore_query.Cursor(urlsafe=cursor)
query = job.Job.query(job.Job.task == None)
jobs, next_cursor, more = query.fetch_page(_BATCH_SIZE, start_cursor=cursor)
# Because individual job instances might fail to be persisted for some reason
# (e.g. entities exceeding the entity size limit) we'll perform the updates
# one at a time. This is not an ideal state, since we'll want to be able to
# migrate all jobs to an alternative structure in the future, but we recognise
# that partial success is better than total failure.
for j in jobs:
try:
j.put()
status['count'] += 1
except datastore_errors.Error as e:
logging.error('Failed migrating job %s: %s', j.job_id, e)
status['errors'] += 1
if more:
stored_object.Set(_STATUS_KEY, status)
deferred.defer(_Migrate, status, next_cursor.urlsafe())
else:
stored_object.Set(_STATUS_KEY, None)
|
{
"content_hash": "3028f2412240d7ccf3e8729a258845fb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 29.246575342465754,
"alnum_prop": 0.7030444964871194,
"repo_name": "endlessm/chromium-browser",
"id": "98fb120fb6106bab74655ad12cb258eba7278926",
"size": "2298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/dashboard/dashboard/pinpoint/handlers/migrate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import errno
import json
import logging
import os
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
logger = logging.getLogger(__name__)
class DashboardExporter(object):
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
pass
class ProjectProcessor(object):
def __init__(self, dashboard_processors):
"""
:type dashboard_processors: list[grafana_dashboards.builder.DashboardExporter]
"""
super(ProjectProcessor, self).__init__()
self._dashboard_processors = dashboard_processors
def process_projects(self, projects, parent_context=None):
"""
:type projects: list[grafana_dashboards.components.projects.Project]
:type parent_context: dict
"""
for project in projects:
logger.info("Processing project '%s'", project.name)
for context in project.get_contexts(parent_context):
for dashboard in project.get_dashboards():
json_obj = dashboard.gen_json(context)
dashboard_name = context.expand_placeholders(dashboard.name)
for processor in self._dashboard_processors:
processor.process_dashboard(project.name, dashboard_name, json_obj)
class FileExporter(DashboardExporter):
def __init__(self, output_folder):
super(FileExporter, self).__init__()
self._output_folder = output_folder
if not os.path.exists(self._output_folder):
os.makedirs(self._output_folder)
if not os.path.isdir(self._output_folder):
raise Exception("'{0}' must be a directory".format(self._output_folder))
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
super(FileExporter, self).process_dashboard(project_name, dashboard_name, dashboard_data)
dirname = os.path.join(self._output_folder, project_name)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dashboard_path = os.path.join(dirname, dashboard_name + '.json')
logger.info("Saving dashboard '%s' to '%s'", dashboard_name, os.path.abspath(dashboard_path))
with open(dashboard_path, 'w') as f:
json.dump(dashboard_data, f, sort_keys=True, indent=2, separators=(',', ': '))
|
{
"content_hash": "857c4a8da839d2cd48864e2cbf8d2072",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 101,
"avg_line_length": 35.73529411764706,
"alnum_prop": 0.6353909465020576,
"repo_name": "jakubplichta/grafana-dashboard-builder",
"id": "d01aceb2140c1e6c994a6f7334188db811c557ad",
"size": "3057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grafana_dashboards/exporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102888"
}
],
"symlink_target": ""
}
|
"""URLs for the ``task_list`` app."""
from django.conf.urls.defaults import include, patterns, url
urlpatterns = patterns(
'',
url(r'^', include('task_list.urls.simple')),
url(r'^', include('task_list.urls.ctype')),
)
|
{
"content_hash": "f1d05a733df303e828a68a2a1a049af8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.6336206896551724,
"repo_name": "bitmazk/django-task-list",
"id": "d1a85e5942d708a89ecd052e81c7d6e5f1a8da5f",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_list/urls/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90719"
}
],
"symlink_target": ""
}
|
from .solver import model
from .dao import localDAO
from .logger import Logger
def irt(data_src,
dao_type='memory',
theta_bnds=[-4, 4], num_theta=11,
alpha_bnds=[0.25, 2], beta_bnds=[-2, 2], in_guess_param={},
model_spec='2PL',
max_iter=10, tol=1e-3, nargout=2,
is_parallel=False, num_cpu=6, check_interval=60,
mode='debug', log_path=None):
# add logging
logger = Logger.logger(log_path)
# load data
logger.info("start loading data")
if dao_type == 'memory':
dao_instance = localDAO(data_src, logger)
elif dao_type == "db":
dao_instance = data_src
else:
raise ValueError("dao type needs to be either memory or db")
logger.info("data loaded")
# setup the model
if model_spec == '2PL':
mod = model.IRT_MMLE_2PL(dao_instance,
logger,
dao_type=dao_type,
is_parallel=is_parallel,
num_cpu=num_cpu,
check_interval=check_interval,
mode=mode)
else:
raise Exception('Unknown model specification.')
# specify the irt parameters
mod.set_options(theta_bnds, num_theta, alpha_bnds, beta_bnds, max_iter, tol)
mod.set_guess_param(in_guess_param)
# solve
mod.solve_EM()
logger.info("parameter estimated")
# output
item_param_dict = mod.get_item_param()
logger.info("parameter retrieved")
if nargout == 1:
return item_param_dict
elif nargout == 2:
user_param_dict = mod.get_user_param()
return item_param_dict, user_param_dict
else:
raise Exception('Invalid number of argument')
|
{
"content_hash": "9eda65d1388dfe1b1d713fc7bd6cf110",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 29.56140350877193,
"alnum_prop": 0.5910979228486647,
"repo_name": "17zuoye/pyirt",
"id": "7bf3fd971e56d0aa0895d0941eaa3ee66d18a832",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyirt/_pyirt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "73657"
}
],
"symlink_target": ""
}
|
from simple_graph import Graph
import pytest
DFT_A = [u'A', u'E', u'C', u'G', u'B', u'F', u'D']
BFT_A = [u'A', u'B', u'C', u'E', u'D', u'F', u'G']
@pytest.fixture(scope='function')
def empty_graph():
''' Define an empty graph. '''
return Graph()
@pytest.fixture(scope='function')
def non_empty_graph():
''' Define an graph with a node. '''
g = Graph()
g.add_node("A")
return g
@pytest.fixture(scope='function')
def edges_graph():
''' Define a graph with an edge. '''
g = Graph()
g.add_edge("A", "B")
return g
@pytest.fixture(scope='function')
def non_multi_connected_nodes():
''' Define a graph with no multi-connected nodes. '''
g = Graph()
g.add_edge("A", "B")
g.add_edge("A", "C")
g.add_edge("A", "E")
g.add_edge("B", "D")
g.add_edge("B", "F")
g.add_edge("C", "G")
return g
@pytest.fixture(scope='function')
def multi_connected_nodes(non_multi_connected_nodes):
''' Define a graph with multi-connected nodes. '''
g = non_multi_connected_nodes
g.add_edge("F", "E")
return g
@pytest.fixture(scope='function')
def cyclic_graph(multi_connected_nodes):
''' Define a graph with a cycle. '''
g = multi_connected_nodes
g.add_edge("E", "A")
return g
@pytest.fixture(scope='function')
def orphan_node(non_multi_connected_nodes):
''' Define a graph with an orphaned node. '''
g = non_multi_connected_nodes
g.add_edge("H", "E")
return g
@pytest.fixture(scope='function')
def childless_orphan_node(non_multi_connected_nodes):
''' Define a graph with completely unconnected node. '''
g = non_multi_connected_nodes
g.add_node("I")
return g
def test_init(empty_graph):
''' Test graph creation. '''
g = empty_graph
assert g.graph == {}
def test_add_node_empty(empty_graph):
''' Test node adding. '''
# Add to empty graph
g = empty_graph
g.add_node("A")
assert "A" in g.graph
assert g.graph["A"] == []
def test_add_node_non_empty(non_empty_graph):
''' Test node adding. '''
g = non_empty_graph
# Add to populated graph
g.add_node("B")
assert "B" in g.graph
assert g.graph["B"] == []
def test_nodes_empty(empty_graph):
''' Test node listing. '''
g = empty_graph
# Test empty graph
assert g.nodes() == []
def test_nodes_non_empty(non_empty_graph):
''' Test node listing. '''
g = Graph()
g.add_node("B")
# Test populated graph
assert "A" and "B" in g.graph
def test_add_edge_empty(empty_graph):
''' Test adding an edge to graph. '''
g = empty_graph
# Add to empty graph
g.add_edge("A", "B")
assert "A" in g.graph
assert "B" in g.graph
assert "B" in g.graph["A"]
def test_add_edge_existing_node(non_empty_graph):
''' Test adding an edge to graph. '''
# Add edge from existing node
g = non_empty_graph
g.add_edge("A", "C")
assert "C" in g.graph
assert "C" in g.graph["A"]
def test_add_edge_non_existing_node(non_empty_graph):
''' Test adding an edge to graph. '''
# Add edge from non-existing node
g = non_empty_graph
g.add_edge("C", "A")
assert "C" in g.graph
assert "A" in g.graph["C"]
def test_add_edge_existing_nodes(non_empty_graph):
''' Test addinga edge to graph. '''
# Add edge between two existing nodes
g = non_empty_graph
g.add_node("C")
g.add_edge("A", "C")
assert "C" in g.graph["A"]
def test_edges_empty(empty_graph):
''' Test edge listing. '''
g = empty_graph
# Test empty graph
assert [] == g.edges()
def test_edges_non_empty(edges_graph):
g = edges_graph
# Test non-empty graph
assert ("A", "B") in g.edges()
def test_del_edge_empty(empty_graph):
''' Test deleting an edge from graph. '''
g = empty_graph
# Empty graph
with pytest.raises(KeyError):
g.del_edge("A", "B")
def test_del_edge_no_neighbors(non_empty_graph):
''' Test deleting an edge from graph. '''
# Node1 doesn't exit
g = non_empty_graph
with pytest.raises(KeyError):
g.del_edge("C", "A")
# Node2 doesn't exit
with pytest.raises(ValueError):
g.del_edge("A", "C")
# Node2 exists but isn't a neighbor
g.add_node("B")
with pytest.raises(ValueError):
g.del_edge("A", "B")
def test_del_edge(edges_graph):
''' Test deleting an edge from graph. '''
g = edges_graph
# Both nodes exist and one is a neighbor
g.del_edge("A", "B")
assert ("A", "B") not in g.edges()
def test_del_edge_reverse(edges_graph):
''' Test deleting an edge from graph. '''
g = edges_graph
# Both nodes exist and but the edge is the opposite direction
with pytest.raises(ValueError):
g.del_edge("B", "A")
assert ("A", "B") in g.edges()
def test_del_node(empty_graph):
''' Test deleting a node. '''
g = empty_graph
# Empty graph
with pytest.raises(KeyError):
g.del_node("A")
def test_del_node_unconnected(non_empty_graph):
''' Test deleting a node. '''
g = non_empty_graph
# Unconnected node
g.del_node("A")
assert "A" not in g.graph
def test_del_node_with_neighbor(edges_graph):
''' Test delete node with neighbor. '''
g = edges_graph
g.del_node("A")
assert "A" not in g.graph
assert "B" in g.graph
assert ("A", "B") not in g.edges()
def test_del_node_neighbor(edges_graph):
''' Delete node that is a neighbor '''
g = edges_graph
g.del_node("B")
assert "B" not in g.graph
assert "A" in g.graph
assert ("A", "B") not in g.edges()
def test_has_node_empty(empty_graph):
''' Test if a node exists in an empty graph. '''
g = empty_graph
# Empty graph
assert g.has_node("A") is False
def test_has_node_non_empty(non_empty_graph):
''' Test if a node exists. '''
g = non_empty_graph
g.add_node("A")
assert g.has_node("A") is True
assert g.has_node("B") is False
def test_neighbors_empty(empty_graph):
''' Test neighbor listing of empty graph. '''
g = empty_graph
# Empty graph
with pytest.raises(KeyError):
g.neighbors("A")
def test_neighbors_non_empty(non_empty_graph):
''' Test neighbor listing of an unconnected node. '''
g = non_empty_graph
# Node without neighbors
g.add_node("A")
assert g.neighbors("A") == []
def test_neighbors_edge(edges_graph):
''' Test neighbor listing of a node with an edge. '''
g = edges_graph
assert g.neighbors("A") == ["B"]
# Check that neighbor has not reverse added first node
assert g.neighbors("B") == []
# Add additional neighbors
g.add_edge("A", "C")
assert g.neighbors("A") == ["B", "C"]
def test_adjacent_empty(empty_graph):
''' Test if two nodes are adjacent in an empty graph. '''
g = empty_graph
with pytest.raises(KeyError):
g.adjacent("A", "B")
def test_adjacent_non_empty(non_empty_graph):
''' Test if two nodes are adjacent in graph with one node. '''
# First node exists but the other doesn't
g = non_empty_graph
assert g.adjacent("A", "B") is False
# Second node exists but the other doesn't
with pytest.raises(KeyError):
g.adjacent("B", "A") is False
def test_adjacent_edge(edges_graph):
''' Test if two nodes are adjacent in graph with one node. '''
# Second node is neighbor to the first, but the first is not neighbor
# to the second.
g = edges_graph
assert g.adjacent("A", "B") is True
assert g.adjacent("B", "A") is False
def test_DFT_non(non_multi_connected_nodes):
# Start at A
assert DFT_A == non_multi_connected_nodes.depth_first_traversal("A")
# Start at B
assert [u'B', u'F', u'D'] == \
non_multi_connected_nodes.depth_first_traversal("B")
# Start at G
assert [u'G'] == \
non_multi_connected_nodes.depth_first_traversal("G")
def test_DFT_mul(multi_connected_nodes):
# Start at A
assert DFT_A == multi_connected_nodes.depth_first_traversal("A")
# Start at B
assert [u'B', u'F', u'E', u'D'] == \
multi_connected_nodes.depth_first_traversal("B")
def test_DFT_cyc(cyclic_graph):
# Start at A
assert DFT_A == cyclic_graph.depth_first_traversal("A")
# Start at B
assert [u'B', u'F', u'E', u'A', u'C', u'G', u'D'] == \
cyclic_graph.depth_first_traversal("B")
def test_DFT_orph(orphan_node):
# Start at A
assert DFT_A == orphan_node.depth_first_traversal("A")
# Start at B
assert [u'H', u'E'] == \
orphan_node.depth_first_traversal("H")
def test_DFT_corph(childless_orphan_node):
# Start at A
assert DFT_A == childless_orphan_node.depth_first_traversal("A")
# Start at B
assert [u'I'] == \
childless_orphan_node.depth_first_traversal("I")
def test_BFT(non_multi_connected_nodes):
# Start at A
assert BFT_A == non_multi_connected_nodes.breadth_first_traversal("A")
# Start at B
assert [u'B', u'D', u'F'] == \
non_multi_connected_nodes.breadth_first_traversal("B")
def test_BFT_mul(multi_connected_nodes):
# Start at A
assert BFT_A == multi_connected_nodes.breadth_first_traversal("A")
# Start at B
assert [u'B', u'D', u'F', u'E'] == \
multi_connected_nodes.breadth_first_traversal("B")
def test_BFT_cyc(cyclic_graph):
# Start at A
assert BFT_A == cyclic_graph.breadth_first_traversal("A")
# Start at B
assert [u'B', u'D', u'F', u'E', u'A', u'C', u'G'] == \
cyclic_graph.breadth_first_traversal("B")
def test_BFT_orph(orphan_node):
# Start at A
assert BFT_A == orphan_node.breadth_first_traversal("A")
# Start at B
assert [u'B', u'D', u'F'] == \
orphan_node.breadth_first_traversal("B")
def test_BFT_corph(childless_orphan_node):
# Start at A
assert BFT_A == childless_orphan_node.breadth_first_traversal("A")
# Start at B
assert [u'B', u'D', u'F'] == \
childless_orphan_node.breadth_first_traversal("B")
|
{
"content_hash": "dd89999f1f0867b07774a049eb3afc66",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 74,
"avg_line_length": 25.704370179948587,
"alnum_prop": 0.6025602560256026,
"repo_name": "constanthatz/data-structures",
"id": "22268f027cdf7add57d87ced3c0f046a775957bc",
"size": "10021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_simple_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52502"
}
],
"symlink_target": ""
}
|
import random
def setup_cats(num_cats):
cat_names = ["Fluffles", "Enzo", "Lisa", "Berto", "Jillian", "Amy",
"Bella", "Moe", "Tibby"]
foods = ["vinegar", "vegemite", "vanilla", "acorn squash",
"Canadian bacon", "alligator", "cayenne pepper", "adobo",
"almond butter",
"garlic"]
cats = []
for _ in range(num_cats):
new_cat = {
"name": random.choice(cat_names),
"last_ate": None
}
cats.append(new_cat)
for cat in cats:
cat["last_ate"] = random.choice(foods)
return cats
|
{
"content_hash": "a40683d61006d8b57f3abbdb2b9b42f8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 71,
"avg_line_length": 25.375,
"alnum_prop": 0.5024630541871922,
"repo_name": "keeppythonweird/catinabox",
"id": "bfefbb495df84777a8a82ef55fc4e6d6cc3cc1d0",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/complected/cats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9606"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.actions import *
# A color layer is a Layer with the a color attribute
class HelloWorld(cocos.layer.ColorLayer):
def __init__(self):
# blueish color
super(HelloWorld, self).__init__(64, 64, 224, 255)
# a cocos.text.Label is a wrapper of pyglet.text.Label
# with the benefit of being a CocosNode
label = cocos.text.Label('Hello, World!',
font_name='Times New Roman',
font_size=32,
anchor_x='center', anchor_y='center')
# set the label in the center of the screen
label.position = 320, 240
self.add(label)
# similar to cocos.text.Label, a cocos.sprite.Sprite
# is a subclass of pyglet.sprite.Sprite with the befits of
# being a CocosNode.
sprite = cocos.sprite.Sprite('grossini.png')
# sprite in the center of the screen (default is 0,0)
sprite.position = 320, 240
# sprite scale attribute starts with 3 (default 1 )
sprite.scale = 3
# add the sprite as a child, but with z=1 (default is z=0).
# this means that the sprite will be drawn on top of the label
self.add(sprite, z=1)
# create a ScaleBy action that lasts 2 seconds
scale = ScaleBy(3, duration=2)
# tell the label to scale and scale back and repeat these 2 actions forever
label.do(Repeat(scale + Reverse(scale)))
# tell the sprite to scaleback and then scale, and repeat these 2 actions forever
sprite.do(Repeat(Reverse(scale) + scale))
if __name__ == "__main__":
# director init takes the same arguments as pyglet.window
cocos.director.director.init()
# We create a new layer, an instance of HelloWorld
hello_layer = HelloWorld()
# tell the layer to perform a Rotate action in 10 seconds.
hello_layer.do(RotateBy(360, duration=10))
# A scene that contains the layer hello_layer
main_scene = cocos.scene.Scene(hello_layer)
# And now, start the application, starting with main_scene
cocos.director.director.run(main_scene)
# or you could have written, without so many comments:
# director.run( cocos.scene.Scene( HelloWorld() ) )
|
{
"content_hash": "ffc4de0c096e162b2866087bd21c9feb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 89,
"avg_line_length": 34.49315068493151,
"alnum_prop": 0.6338363780778395,
"repo_name": "dangillet/cocos",
"id": "5dda25ec6ef0a27e6c7f10acbcc5d0782064a049",
"size": "2561",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "samples/hello_world_actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7097"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "1352722"
}
],
"symlink_target": ""
}
|
from importlib import import_module
def import_from_path(path):
parts = path.split(".")
module_path = ".".join(parts[:-1])
attribute_name = parts[-1]
module = import_module(module_path)
attribute = getattr(module, attribute_name)
return attribute
|
{
"content_hash": "07126ecf4c3b8c18651517f614c0b6d9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 21.23076923076923,
"alnum_prop": 0.6630434782608695,
"repo_name": "wiki-ai/ores",
"id": "e8a3d15b9d23d3f16cc4350871cef00603cb5f3e",
"size": "276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ores/utilities/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "433"
},
{
"name": "Dockerfile",
"bytes": "481"
},
{
"name": "HTML",
"bytes": "9290"
},
{
"name": "JavaScript",
"bytes": "5003"
},
{
"name": "Jupyter Notebook",
"bytes": "44108"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "157474"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from numpy.testing import assert_allclose
import theano
from keras.layers import core
class TestLayerBase(unittest.TestCase):
def test_input_output(self):
nb_samples = 10
input_dim = 5
layer = core.Layer()
# As long as there is no input, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer.get_input, train)
self.assertRaises(AttributeError, layer.get_output, train)
# Once an input is provided, it should be reachable through the
# appropriate getters
input = np.ones((nb_samples, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
assert_allclose(layer.get_input(train).eval(), input)
assert_allclose(layer.get_output(train).eval(), input)
def test_connections(self):
nb_samples = 10
input_dim = 5
layer1 = core.Layer()
layer2 = core.Layer()
input = np.ones((nb_samples, input_dim))
layer1.input = theano.shared(value=input)
# As long as there is no previous layer, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer2.get_input, train)
# After connecting, input of layer1 should be passed through
layer2.set_previous(layer1)
for train in [True, False]:
assert_allclose(layer2.get_input(train).eval(), input)
assert_allclose(layer2.get_output(train).eval(), input)
class TestConfigParams(unittest.TestCase):
"""
Test the constructor, config and params functions of all layers in core.
"""
def _runner(self, layer):
conf = layer.get_config()
assert (type(conf) == dict)
param = layer.get_params()
# Typically a list or a tuple, but may be any iterable
assert hasattr(param, '__iter__')
def test_base(self):
layer = core.Layer()
self._runner(layer)
def test_masked(self):
layer = core.MaskedLayer()
self._runner(layer)
def test_merge(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.Merge([layer_1, layer_2])
self._runner(layer)
def test_dropout(self):
layer = core.Dropout(0.5)
self._runner(layer)
def test_activation(self):
layer = core.Activation('linear')
self._runner(layer)
def test_reshape(self):
layer = core.Reshape(10, 10)
self._runner(layer)
def test_flatten(self):
layer = core.Flatten()
self._runner(layer)
def test_repeat_vector(self):
layer = core.RepeatVector(10)
self._runner(layer)
def test_dense(self):
layer = core.Dense(10, 10)
self._runner(layer)
def test_act_reg(self):
layer = core.ActivityRegularization(0.5, 0.5)
self._runner(layer)
def test_time_dist_dense(self):
layer = core.TimeDistributedDense(10, 10)
self._runner(layer)
def test_autoencoder(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.AutoEncoder(layer_1, layer_2)
self._runner(layer)
def test_maxout_dense(self):
layer = core.MaxoutDense(10, 10)
self._runner(layer)
class TestMasking(unittest.TestCase):
"""Test the Masking class"""
def test_sequences(self):
"""Test masking sequences with zeroes as padding"""
# integer inputs, one per timestep, like embeddings
layer = core.Masking()
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input
func(np.array(
[[[1], [2], [3], [0]],
[[0], [4], [5], [0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [0, 1, 1, 0]])))
def test_non_zero(self):
"""Test masking with non-zero mask value"""
layer = core.Masking(5)
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input, if not all the values are 5, shouldn't masked
func(np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [1, 1, 1, 1]])))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3ad553655d089645400c87f43c71f68f",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 84,
"avg_line_length": 31.02013422818792,
"alnum_prop": 0.5822154911293812,
"repo_name": "zhangxujinsh/keras",
"id": "9d7bbd536e9c021683a39f5cb4aeabf85c938757",
"size": "4622",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/auto/keras/layers/test_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "298214"
}
],
"symlink_target": ""
}
|
import json
from contextlib import closing
from typing import Any, Dict, Optional
from flask_babel import gettext as __
from superset.commands.base import BaseCommand
from superset.databases.commands.exceptions import (
DatabaseOfflineError,
DatabaseTestConnectionFailedError,
InvalidEngineError,
InvalidParametersError,
)
from superset.databases.dao import DatabaseDAO
from superset.databases.utils import make_url_safe
from superset.db_engine_specs import get_engine_specs
from superset.db_engine_specs.base import BasicParametersMixin
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.extensions import event_logger
from superset.models.core import Database
BYPASS_VALIDATION_ENGINES = {"bigquery"}
class ValidateDatabaseParametersCommand(BaseCommand):
def __init__(self, parameters: Dict[str, Any]):
self._properties = parameters.copy()
self._model: Optional[Database] = None
def run(self) -> None:
engine = self._properties["engine"]
engine_specs = get_engine_specs()
if engine in BYPASS_VALIDATION_ENGINES:
# Skip engines that are only validated onCreate
return
if engine not in engine_specs:
raise InvalidEngineError(
SupersetError(
message=__(
'Engine "%(engine)s" is not a valid engine.',
engine=engine,
),
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
level=ErrorLevel.ERROR,
extra={"allowed": list(engine_specs), "provided": engine},
),
)
engine_spec = engine_specs[engine]
if not hasattr(engine_spec, "parameters_schema"):
raise InvalidEngineError(
SupersetError(
message=__(
'Engine "%(engine)s" cannot be configured through parameters.',
engine=engine,
),
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
level=ErrorLevel.ERROR,
extra={
"allowed": [
name
for name, engine_spec in engine_specs.items()
if issubclass(engine_spec, BasicParametersMixin)
],
"provided": engine,
},
),
)
# perform initial validation
errors = engine_spec.validate_parameters( # type: ignore
self._properties.get("parameters", {})
)
if errors:
event_logger.log_with_context(action="validation_error", engine=engine)
raise InvalidParametersError(errors)
serialized_encrypted_extra = self._properties.get("encrypted_extra", "{}")
try:
encrypted_extra = json.loads(serialized_encrypted_extra)
except json.decoder.JSONDecodeError:
encrypted_extra = {}
# try to connect
sqlalchemy_uri = engine_spec.build_sqlalchemy_uri( # type: ignore
self._properties.get("parameters"),
encrypted_extra,
)
if self._model and sqlalchemy_uri == self._model.safe_sqlalchemy_uri():
sqlalchemy_uri = self._model.sqlalchemy_uri_decrypted
database = DatabaseDAO.build_db_for_connection_test(
server_cert=self._properties.get("server_cert", ""),
extra=self._properties.get("extra", "{}"),
impersonate_user=self._properties.get("impersonate_user", False),
encrypted_extra=serialized_encrypted_extra,
)
database.set_sqlalchemy_uri(sqlalchemy_uri)
database.db_engine_spec.mutate_db_for_connection_test(database)
engine = database.get_sqla_engine()
try:
with closing(engine.raw_connection()) as conn:
alive = engine.dialect.do_ping(conn)
except Exception as ex:
url = make_url_safe(sqlalchemy_uri)
context = {
"hostname": url.host,
"password": url.password,
"port": url.port,
"username": url.username,
"database": url.database,
}
errors = database.db_engine_spec.extract_errors(ex, context)
raise DatabaseTestConnectionFailedError(errors) from ex
if not alive:
raise DatabaseOfflineError(
SupersetError(
message=__("Database is offline."),
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
level=ErrorLevel.ERROR,
),
)
def validate(self) -> None:
database_name = self._properties.get("database_name")
if database_name is not None:
self._model = DatabaseDAO.get_database_by_name(database_name)
|
{
"content_hash": "58a217c182c4cb754a987c67421b6f51",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 87,
"avg_line_length": 38.97674418604651,
"alnum_prop": 0.5763723150357996,
"repo_name": "airbnb/caravel",
"id": "a9f1633a181443e7d7c5d980f40cc384f5f5b67a",
"size": "5813",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/databases/commands/validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
}
|
from concurrent import futures
import itertools
import sys
import re
import pygraphviz as gv
import ply.lex as lex
import ply.yacc as yacc
from functools import lru_cache as memoize
diagnostic_header_pattern = re.compile(r'[^ ]+\.[^ ]+:[0-9]+:[0-9]+: ([^ ]*): (.*)')
in_file_included_from_pattern = re.compile('In file included from .*:')
in_instantiation_of_template_pattern = re.compile('in instantiation of (.*) (?:requested|required) here')
static_warning_marked_deprecated_here_pattern = re.compile('\'static_warning\' has been explicitly marked deprecated here')
class Diagnostic:
def __init__(self, kind, message):
self.kind = kind
self.message = message
self.template_instantiation_trace = []
tokens = (
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
'LBRACE',
'RBRACE',
'LESS_THAN',
'GREATER_THAN',
'DOUBLE_COLON',
'COMMA',
'IDENTIFIER',
'ASTERISK',
'AMPERSAND',
)
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'}'
t_RBRACE = r'{'
t_LESS_THAN = r'<'
t_GREATER_THAN = r'>'
t_DOUBLE_COLON = r'::'
t_COMMA = r','
t_ASTERISK = r'\*'
t_AMPERSAND = r'&'
# We conflate numbers as identifiers too, we don't care about the difference.
t_IDENTIFIER = r'[a-zA-Z0-9_]+'
t_ignore = ' \t'
def t_error(t):
raise Exception("Illegal character '%s' followed by %s" % (t.value[0], t.value[1:]))
class LayoutNeedsMultipleLinesException(Exception):
pass
class AstNode:
def __str__(self):
return ''.join(self)
class TerminalAstNode(AstNode):
def __init__(self, s):
self.s = s
self.is_multiline = (s == '\n')
# last_line_length is the string length if s is not a multiline string.
# For multiline strings ending in a newline, this is 0.
if self.is_multiline:
self.first_line_length = 0
self.last_line_length = 0
self.max_line_length = 0
else:
# This never happens ATM, so we don't handle it.
assert '\n' not in s
self.first_line_length = len(s)
self.last_line_length = len(s)
self.max_line_length = len(s)
def __iter__(self):
return iter((self.s,))
class NonTerminalAstNode(AstNode):
def __init__(self, children_ast_nodes):
self.children_ast_nodes = children_ast_nodes
first_line_length = 0
last_line_length = 0
is_multiline = False
max_line_length = 0
for node in children_ast_nodes:
if node.is_multiline:
last_line_length = node.last_line_length
max_line_length = max(max_line_length, last_line_length + node.first_line_length, node.max_line_length)
is_multiline = True
else:
last_line_length += node.last_line_length
max_line_length = max(max_line_length, last_line_length)
self.first_line_length = first_line_length
self.last_line_length = last_line_length
self.is_multiline = is_multiline
self.max_line_length = max_line_length
def __iter__(self):
return itertools.chain(*self.children_ast_nodes)
max_line_length = 80
# Size of an indent in spaces.
single_indent_length = 4
class TerminalNodeFactory():
def __init__(self, s):
self.s = s
def __call__(self, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
return TerminalAstNode(self.s)
# 'balanced_string' nodes evaluate to a function (or a callable object) taking these parameters:
# current_indent (integer): the indentation in the current line (spaces only)
# current_line_length (integer): the number of preceding characters in the current line (>=current_indent)
# inside_meta_type (boolean): whether we're inside a Type<...>
# last_token_was_type_wrapper (boolean): whether the immediately-preceding token was the identifier 'Type'
# and returning an AstNode
# 'comma_separated_balanced_string' nodes evaluate to a tuple of such functions
def p_comma_separated_balanced_string_empty(p):
'comma_separated_balanced_string : '
p[0] = tuple()
def p_comma_separated_balanced_string_not_empty(p):
'comma_separated_balanced_string : COMMA balanced_string comma_separated_balanced_string'
p[0] = (
p[2],
*(p[3])
)
def p_optional_balanced_string_empty(p):
'optional_balanced_string : '
p[0] = TerminalNodeFactory('')
def p_optional_balanced_string_not_empty(p):
'optional_balanced_string : balanced_string'
p[0] = p[1]
class BalancedStringTerminalNodeFactory():
def __init__(self, first_token, node_factory):
self.first_token = first_token
self.node_factory = node_factory
def __call__(self, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
terminal_node = TerminalAstNode(self.first_token)
non_terminal_node = self.node_factory(
current_indent,
current_line_length + len(self.first_token),
inside_meta_type,
self.first_token == 'Type',
accept_single_line_only)
if non_terminal_node is None:
return None
return NonTerminalAstNode((terminal_node, non_terminal_node))
def p_balanced_string_terminal(p):
'''balanced_string : DOUBLE_COLON balanced_string
| IDENTIFIER optional_balanced_string
| ASTERISK optional_balanced_string
| AMPERSAND optional_balanced_string
'''
first_token = p[1]
node_factory = p[2]
p[0] = BalancedStringTerminalNodeFactory(first_token, node_factory)
def create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, accept_single_line_only):
nodes = []
for node_factory, current_indent, inside_meta_type in node_factory_inside_meta_type_pairs:
node = node_factory(current_indent, current_line_length, inside_meta_type, False, accept_single_line_only)
if node is None:
return None
nodes.append(node)
if node.is_multiline:
if accept_single_line_only:
raise Exception('Unexpected multiline, due to factory: ' + node_factory)
# Note that due to the way we break lines, the last line will have the same indent as the first.
# So we don't need to update current_indent here.
current_line_length = node.last_line_length
else:
current_line_length += node.last_line_length
return NonTerminalAstNode(nodes)
def compute_layout(left_token, intermediate_node_factories, right_token, rhs_node_factory, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
# We lay out the result in one of two ways:
#
# $previousIndent $previousContent LPAREN x1, x2, x3 RPAREN balanced_string
#
# Or:
#
# $previousIndent $previousContent LPAREN
# $previousIndent $indent x1 ,
# $previousIndent $indent x2 ,
# $previousIndent $indent x3 RPAREN balanced_string
entering_meta_type = last_token_was_type_wrapper
# First, we try to use the first format if possible
node_factory_inside_meta_type_pairs = [
(TerminalNodeFactory(left_token), current_indent, inside_meta_type),
*((intermediate_node_factory, current_indent, (inside_meta_type or entering_meta_type))
for intermediate_node_factory in intermediate_node_factories),
(TerminalNodeFactory(right_token), current_indent, inside_meta_type),
(rhs_node_factory, current_indent, inside_meta_type),
]
node_with_single_line_layout = create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, True)
if node_with_single_line_layout is not None and node_with_single_line_layout.max_line_length <= max_line_length:
assert not node_with_single_line_layout.is_multiline
return node_with_single_line_layout
if accept_single_line_only:
return None
# The result exceeds the line length, let's switch to the second one.
node_factory_inside_meta_type_pairs = [
(TerminalNodeFactory(left_token),
current_indent,
inside_meta_type)
]
new_indent_length = current_indent + single_indent_length
comma_node_factory_inside_meta_type_pair = (TerminalNodeFactory(','), current_indent, inside_meta_type or entering_meta_type)
newline_node_factory_inside_meta_type_pair = (TerminalNodeFactory('\n'), current_indent, inside_meta_type or entering_meta_type)
indent_node_factory_inside_meta_type_pair = (TerminalNodeFactory(' ' * new_indent_length), current_indent, inside_meta_type or entering_meta_type)
for inner_node_factory in intermediate_node_factories:
node_factory_inside_meta_type_pairs.append(newline_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.append(indent_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.append((inner_node_factory, new_indent_length, inside_meta_type or entering_meta_type))
node_factory_inside_meta_type_pairs.append(comma_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.pop()
node_factory_inside_meta_type_pairs.append((TerminalNodeFactory(right_token), current_indent, inside_meta_type))
node_factory_inside_meta_type_pairs.append((rhs_node_factory, current_indent, inside_meta_type))
return create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, accept_single_line_only)
def p_balanced_string_with_balanced_token_no_comma_separated_elems(p):
'''balanced_string : LPAREN RPAREN optional_balanced_string
| LBRACKET RBRACKET optional_balanced_string
| LBRACE RBRACE optional_balanced_string
| LESS_THAN GREATER_THAN optional_balanced_string
'''
p_1 = p[1]
p_2 = p[2]
p_3 = p[3]
def result(current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
return compute_layout(p_1, [], p_2, p_3, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only)
p[0] = result
def p_balanced_string_with_balanced_token_some_comma_separated_elems(p):
'''balanced_string : LPAREN balanced_string comma_separated_balanced_string RPAREN optional_balanced_string
| LBRACKET balanced_string comma_separated_balanced_string RBRACKET optional_balanced_string
| LBRACE balanced_string comma_separated_balanced_string RBRACE optional_balanced_string
| LESS_THAN balanced_string comma_separated_balanced_string GREATER_THAN optional_balanced_string
'''
p_1 = p[1]
p_2 = p[2]
p_3 = p[3]
p_4 = p[4]
p_5 = p[5]
def result(current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
if not inside_meta_type:
if p_1 == '(' and p_4 == ')':
if len(p_3) == 0:
if isinstance(p_2, BalancedStringTerminalNodeFactory) and p_2.first_token == '*':
if isinstance(p_2.node_factory, TerminalNodeFactory) and p_2.node_factory.s == '':
# Special case: we're not inside a Type<...> and we've encountered a '(*)'.
# Discard it and just print the rhs.
return p_5(current_indent, current_line_length, inside_meta_type, False, accept_single_line_only)
return compute_layout(p_1, (p_2, *(p_3)), p_4, p_5, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only)
p[0] = result
def p_error(p):
raise Exception("Syntax error when parsing meta type: ", p[:])
lexer = lex.lex()
parser = yacc.yacc(start='balanced_string')
strings_to_remove = re.compile(r'template class |template type alias |function template specialization |member class |member function |default argument for |fruit::impl::meta::|fruit::impl::|fruit::')
def do_simplify_template_trace_element(element):
element, _ = re.subn(strings_to_remove, '', element)
element = element.strip()
if element[0] != '\'' or element[-1] != '\'':
raise Exception('Expected single quotes in: ' + element)
element = element[1:-1]
if element.startswith('DoEval<') and element[-1] == '>':
element = element[7:-1]
result = ''.join(parser.parse(element, lexer)(0, 0, False, False, False))
return result
@memoize(maxsize=1000)
def simplify_template_trace_element(element, executor):
return executor.submit(do_simplify_template_trace_element, element)
def to_dot_left_justified_string(s):
return '\\l'.join(s.splitlines() + [''])
def main():
diagnostics = []
with futures.ProcessPoolExecutor() as executor:
lines = sys.stdin.readlines()
for line_number, line in enumerate(lines):
# Remove the newline
line = line[:-1]
matches = in_file_included_from_pattern.search(line)
if matches:
continue
matches = diagnostic_header_pattern.search(line)
if matches:
diagnostic_kind, diagnostic_message = matches.groups()
if diagnostic_kind == 'error':
diagnostics.append(Diagnostic(diagnostic_kind, diagnostic_message))
print('Processing diagnostic. (%s / %s) ' % (line_number, len(lines)), file=sys.stderr)
elif diagnostic_kind == 'note':
matches = in_instantiation_of_template_pattern.search(diagnostic_message)
if matches:
if not diagnostics:
raise Exception('Found template instantiation note before any error diagnostic: %s' % diagnostic_message)
if 'in instantiation of template type alias' in line:
pass
else:
group = matches.groups()[0]
trace_element_future = simplify_template_trace_element(group, executor)
diagnostics[-1].template_instantiation_trace.append(trace_element_future)
continue
matches = static_warning_marked_deprecated_here_pattern.search(diagnostic_message)
if matches:
continue
raise Exception('Found unknown note: %s' % diagnostic_message)
call_graph = {}
graph = gv.AGraph(directed=True)
for diagnostic_index, diagnostic in enumerate(diagnostics):
if diagnostic_index % 10 == 0:
print('Constructing dep graph: iteration %s/%s' % (diagnostic_index, len(diagnostics)), file=sys.stderr)
template_instantiation_trace = [trace_element_future.result() for trace_element_future in diagnostic.template_instantiation_trace]
for called, caller in zip(template_instantiation_trace[1:], template_instantiation_trace[2:]):
if called in call_graph and call_graph[called] != caller:
# Avoid this edge, so that the resulting graph is a tree
continue
graph.add_edge(to_dot_left_justified_string(caller), to_dot_left_justified_string(called))
call_graph[called] = caller
print(graph)
if __name__ == '__main__':
main()
|
{
"content_hash": "30c834b756b9cbb5b3eaa31c3ead3720",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 200,
"avg_line_length": 43.084468664850135,
"alnum_prop": 0.6386288894510498,
"repo_name": "google/fruit",
"id": "247b18def31094837dee2f5c18d9e968cbdcfa73",
"size": "16434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "extras/scripts/analyze_template_instantiations_clang_diagnostics.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1557"
},
{
"name": "C",
"bytes": "2673"
},
{
"name": "C++",
"bytes": "663620"
},
{
"name": "CMake",
"bytes": "24558"
},
{
"name": "Makefile",
"bytes": "1238"
},
{
"name": "Shell",
"bytes": "29964"
},
{
"name": "Starlark",
"bytes": "11224"
},
{
"name": "TeX",
"bytes": "22671"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from wagtail.admin.forms.collections import CollectionViewRestrictionForm
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.models import Collection, CollectionViewRestriction
from wagtail.permissions import collection_permission_policy
def set_privacy(request, collection_id):
collection = get_object_or_404(Collection, id=collection_id)
if not collection_permission_policy.user_has_permission(request.user, "change"):
raise PermissionDenied
# fetch restriction records in depth order so that ancestors appear first
restrictions = collection.get_view_restrictions().order_by("collection__depth")
if restrictions:
restriction = restrictions[0]
restriction_exists_on_ancestor = restriction.collection != collection
else:
restriction = None
restriction_exists_on_ancestor = False
if request.method == "POST":
form = CollectionViewRestrictionForm(request.POST, instance=restriction)
if form.is_valid() and not restriction_exists_on_ancestor:
if form.cleaned_data["restriction_type"] == CollectionViewRestriction.NONE:
# remove any existing restriction
if restriction:
restriction.delete()
else:
restriction = form.save(commit=False)
restriction.collection = collection
form.save()
return render_modal_workflow(
request,
None,
None,
None,
json_data={
"step": "set_privacy_done",
"is_public": (form.cleaned_data["restriction_type"] == "none"),
},
)
else: # request is a GET
if not restriction_exists_on_ancestor:
if restriction:
form = CollectionViewRestrictionForm(instance=restriction)
else:
# no current view restrictions on this collection
form = CollectionViewRestrictionForm(
initial={"restriction_type": "none"}
)
if restriction_exists_on_ancestor:
# display a message indicating that there is a restriction at ancestor level -
# do not provide the form for setting up new restrictions
return render_modal_workflow(
request,
"wagtailadmin/collection_privacy/ancestor_privacy.html",
None,
{
"collection_with_restriction": restriction.collection,
},
)
else:
# no restriction set at ancestor level - can set restrictions here
return render_modal_workflow(
request,
"wagtailadmin/collection_privacy/set_privacy.html",
None,
{
"collection": collection,
"form": form,
},
json_data={"step": "set_privacy"},
)
|
{
"content_hash": "ab4e63452146ab1eb07416679f8a7bf6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 87,
"avg_line_length": 38.860759493670884,
"alnum_prop": 0.6055374592833876,
"repo_name": "thenewguy/wagtail",
"id": "9fe5c3f0f6ad0071a775983252569cbb1bb06419",
"size": "3070",
"binary": false,
"copies": "4",
"ref": "refs/heads/tng_master",
"path": "wagtail/admin/views/collection_privacy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593033"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6571572"
},
{
"name": "SCSS",
"bytes": "219986"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288325"
}
],
"symlink_target": ""
}
|
from flask import Flask
import json
from cow_mq.client import Client as CowMQClient
import threading
import logging
SERVER_DOMAIN = 'www.hcy.com'
app = Flask(__name__)
app.config['SECRET_KEY'] = 'cow_mq_2017'
config = {}
config['cow_mq_ip'] = '192.168.159.114'
#config['cow_mq_ip'] = 'your_mqtt_ip'
config['cow_mq_port'] = 1883
config['cow_mq_username'] = None
config['cow_mq_password'] = None
config['cow_mq_tls_ca_certs'] = None
config['cow_mq_tls_certfile'] = None
config['cow_mq_tls_keyfile'] = None
cow_client = CowMQClient(config, logging_level=logging.DEBUG)
cow_client.register_server_connected(SERVER_DOMAIN)
def sign_in_callback(domain, rule, rsp_data):
print('sign_in async rsp: {}'.format(rsp_data))
def thread_send():
data = json.dumps({'type': 1})
print('version sync send: {}'.format(data))
rsp_data = cow_client.sync_send(SERVER_DOMAIN, '/version',
data.encode('utf-8'), timeout=10)
print('version sync rsp: {}'.format(rsp_data))
def on_cow_mq_client_connect(self, client, userdata, flags, rc):
data = json.dumps({'username': 'admin', 'password': '123456'})
print('sign_in async send: {}'.format(data))
cow_client.async_send(SERVER_DOMAIN, '/account/sign_in',
data.encode('utf-8'), sign_in_callback, timeout=10)
t = threading.Thread(target=thread_send)
t.start()
def on_cow_mq_server_connect(domain):
print('on_cow_mq_server_connect: {}'.format(domain))
def on_cow_mq_server_disconnect(domain):
print('on_cow_mq_server_disconnect: {}'.format(domain))
cow_client.on_connect = on_cow_mq_client_connect
cow_client.on_server_connect = on_cow_mq_server_connect
cow_client.on_server_disconnect = on_cow_mq_server_disconnect
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=21011)
|
{
"content_hash": "9b85b199362bb30346e670cb1853120e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 29.725806451612904,
"alnum_prop": 0.6625067824199674,
"repo_name": "duncanHsu/CowMQ-Python",
"id": "05049e076dd55aafe626e32cf7c14f58024b7093",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31437"
}
],
"symlink_target": ""
}
|
import socket
from unittest import mock
import pytest
from aiohttp.tcp_helpers import tcp_nodelay
has_ipv6: bool = socket.has_ipv6
if has_ipv6:
# The socket.has_ipv6 flag may be True if Python was built with IPv6
# support, but the target system still may not have it.
# So let's ensure that we really have IPv6 support.
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except OSError:
has_ipv6 = False
# nodelay
def test_tcp_nodelay_exception() -> None:
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
s.setsockopt.assert_called_with(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
def test_tcp_nodelay_enable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_tcp_nodelay_enable_and_disable() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
tcp_nodelay(transport, False)
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
def test_tcp_nodelay_enable_ipv6() -> None:
transport = mock.Mock()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not hasattr(socket, "AF_UNIX"), reason="requires unix sockets")
def test_tcp_nodelay_enable_unix() -> None:
# do not set nodelay for unix socket
transport = mock.Mock()
s = mock.Mock(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
tcp_nodelay(transport, True)
assert not s.setsockopt.called
def test_tcp_nodelay_enable_no_socket() -> None:
transport = mock.Mock()
transport.get_extra_info.return_value = None
tcp_nodelay(transport, True)
|
{
"content_hash": "c29996ca74ac7212e065bbdd68922d63",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 83,
"avg_line_length": 33.082191780821915,
"alnum_prop": 0.6935817805383023,
"repo_name": "KeepSafe/aiohttp",
"id": "2b468ba470a14c6a96bd785134e07ab4c3677e1a",
"size": "2415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tcp_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3179"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1236385"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
}
|
import requests
url = "http://mockbin.com/har"
response = requests.request("PROPFIND", url)
print(response.text)
|
{
"content_hash": "20eae1e211272449b98953ceba464625",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 44,
"avg_line_length": 16.571428571428573,
"alnum_prop": 0.7327586206896551,
"repo_name": "restlet/httpsnippet",
"id": "8a848a56dae919ed297b91cb84a2cb1a0ac337b0",
"size": "116",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/fixtures/output/python/requests/custom-method.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "103817"
}
],
"symlink_target": ""
}
|
ACCOUNT_NAME = 'Office Spot'
|
{
"content_hash": "417b3b9dad6580f87ffd69323cf317ca",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.7241379310344828,
"repo_name": "ddy88958620/lib",
"id": "4a0d360a8ba3f784ca2b472096a8acd6b66c95fb",
"size": "29",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/scrapy/officespot/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import numpy as np
import xgboost as xgb
import time
# path to where the data lies
dpath = '../../demo/data'
# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } )
dtrain = np.concatenate((dtrain, np.copy(dtrain)))
dtrain = np.concatenate((dtrain, np.copy(dtrain)))
dtrain = np.concatenate((dtrain, np.copy(dtrain)))
test_size = len(dtrain)
print(len(dtrain))
print ('finish loading from csv ')
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 )
# print weight statistics
print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos ))
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
# setup parameters for xgboost
param = {}
# use logistic regression loss
param['objective'] = 'binary:logitraw'
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg/sum_wpos
param['bst:eta'] = 0.1
param['max_depth'] = 15
param['eval_metric'] = 'auc'
param['nthread'] = 16
plst = param.items()+[('eval_metric', 'ams@0.15')]
watchlist = [ (xgmat,'train') ]
num_round = 10
print ("training xgboost")
threads = [16]
for i in threads:
param['nthread'] = i
tmp = time.time()
plst = param.items()+[('eval_metric', 'ams@0.15')]
bst = xgb.train( plst, xgmat, num_round, watchlist );
print ("XGBoost with %d thread costs: %s seconds" % (i, str(time.time() - tmp)))
print ("training xgboost - gpu tree construction")
param['updater'] = 'grow_gpu'
tmp = time.time()
plst = param.items()+[('eval_metric', 'ams@0.15')]
bst = xgb.train( plst, xgmat, num_round, watchlist );
print ("XGBoost GPU: %s seconds" % (str(time.time() - tmp)))
print ('finish training')
|
{
"content_hash": "8d6eec66072e544ec95d5296f1f0bc54",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 111,
"avg_line_length": 33.29032258064516,
"alnum_prop": 0.6671511627906976,
"repo_name": "RPGOne/Skynet",
"id": "fc76d98b266b82c0588abb6c416fcf80051c7c91",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/Miho",
"path": "xgboost-master/plugin/updater_gpu/speed_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "11425802"
},
{
"name": "Batchfile",
"bytes": "123467"
},
{
"name": "C",
"bytes": "34703955"
},
{
"name": "C#",
"bytes": "55955"
},
{
"name": "C++",
"bytes": "84647314"
},
{
"name": "CMake",
"bytes": "220849"
},
{
"name": "CSS",
"bytes": "39257"
},
{
"name": "Cuda",
"bytes": "1344541"
},
{
"name": "DIGITAL Command Language",
"bytes": "349320"
},
{
"name": "DTrace",
"bytes": "37428"
},
{
"name": "Emacs Lisp",
"bytes": "19654"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Fortran",
"bytes": "16914"
},
{
"name": "HTML",
"bytes": "929759"
},
{
"name": "Java",
"bytes": "112658"
},
{
"name": "JavaScript",
"bytes": "32806873"
},
{
"name": "Jupyter Notebook",
"bytes": "1616334"
},
{
"name": "Lua",
"bytes": "22549"
},
{
"name": "M4",
"bytes": "64967"
},
{
"name": "Makefile",
"bytes": "1046428"
},
{
"name": "Matlab",
"bytes": "888"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "2860"
},
{
"name": "Objective-C",
"bytes": "131433"
},
{
"name": "PHP",
"bytes": "750783"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "626627"
},
{
"name": "Perl 6",
"bytes": "2495926"
},
{
"name": "PowerShell",
"bytes": "38374"
},
{
"name": "Prolog",
"bytes": "300018"
},
{
"name": "Python",
"bytes": "26363074"
},
{
"name": "R",
"bytes": "236175"
},
{
"name": "Rebol",
"bytes": "217"
},
{
"name": "Roff",
"bytes": "328366"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scala",
"bytes": "248902"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "360815"
},
{
"name": "TeX",
"bytes": "105346"
},
{
"name": "Vim script",
"bytes": "6101"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "5158"
}
],
"symlink_target": ""
}
|
def includeme(config):
# We need to get the value of the Warehouse and Forklift domains, we'll use
# these to segregate the Warehouse routes from the Forklift routes until
# Forklift is properly split out into it's own project.
warehouse = config.get_settings().get("warehouse.domain")
# General Admin pages
config.add_route("admin.dashboard", "/admin/", domain=warehouse)
config.add_route("admin.login", "/admin/login/", domain=warehouse)
config.add_route("admin.logout", "/admin/logout/", domain=warehouse)
# User related Admin pages
config.add_route("admin.user.list", "/admin/users/", domain=warehouse)
config.add_route(
"admin.user.detail",
"/admin/users/{user_id}/",
domain=warehouse,
)
|
{
"content_hash": "28d8cef26cf5500a27ba433285e88999",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 42.611111111111114,
"alnum_prop": 0.6792698826597132,
"repo_name": "alex/warehouse",
"id": "22af85df6120351a4416097ad7201346233e74a0",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warehouse/admin/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "135448"
},
{
"name": "HTML",
"bytes": "95233"
},
{
"name": "JavaScript",
"bytes": "27705"
},
{
"name": "Makefile",
"bytes": "5817"
},
{
"name": "Mako",
"bytes": "1505"
},
{
"name": "Perl",
"bytes": "15498"
},
{
"name": "Python",
"bytes": "909699"
},
{
"name": "Shell",
"bytes": "4504"
}
],
"symlink_target": ""
}
|
from typing import Dict, Iterable
from flask_babel import LazyString, lazy_gettext
from c3bottles.model import drop_point
class Category:
"""
A category of drop points for different stuff, e.g. bottles, trash, etc.
Different categories of drop points allow different teams or specialized
groups within the same team to organize their work through the same
frontend. Therefore, each drop point belongs to one category.
"""
def __init__(self, category_id: int, name: LazyString):
self.category_id: int = category_id
self.name: LazyString = name
self.metrics_name: str = str(name)
def __str__(self) -> str:
return str(self.name)
def __len__(self) -> int:
return drop_point.DropPoint.query.filter(
drop_point.DropPoint.category_id == self.category_id,
drop_point.DropPoint.removed == None, # noqa
).count()
@staticmethod
def get(category_id: int) -> "Category":
return all_categories.get(category_id, all_categories[0])
"""
A dict of all categories.
The categories must be indexed by their integer id and their name should be a
lazy string so that l10n becomes easy. The ids kan be any integer but at least
the category with the id 0 must be present as it is the default fallback
category. If that one is not present, everything will fall apart.
"""
all_categories: Dict[int, Category] = {
0: Category(0, lazy_gettext("Bottle Drop Point")),
1: Category(1, lazy_gettext("Trashcan")),
}
def categories_sorted() -> Iterable[Category]:
"""
Get a list of all categories sorted by their human-readable name with
respect to the user's language.
:return: A list of all categories sorted by name.
"""
return sorted(all_categories.values(), key=lambda i: str(i))
|
{
"content_hash": "6146414b72b29880b93ca4749ec5c998",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 31.94736842105263,
"alnum_prop": 0.6820428336079077,
"repo_name": "der-michik/c3bottles",
"id": "9828b52dbd2a7dcb9c1ca2d0bdea0ca19fc934e6",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c3bottles/model/category.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1937"
},
{
"name": "Dockerfile",
"bytes": "1010"
},
{
"name": "HTML",
"bytes": "61195"
},
{
"name": "JavaScript",
"bytes": "32088"
},
{
"name": "Makefile",
"bytes": "584"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "97736"
},
{
"name": "Shell",
"bytes": "497"
}
],
"symlink_target": ""
}
|
from buildbot.buildslave import BuildSlave
from buildbot.buildslave.ec2 import EC2LatentBuildSlave
# using simplejson instead of json since Twisted wants ascii instead of unicode
import simplejson as json
slaves = []
# Load slaves from external file, see slaves.json.sample
for slave in json.load(open("slaves.json")):
if 'latentslave' in slave['name']:
slaves.append(EC2LatentBuildSlave(
slave['name'],
slave['password'],
'c3.large',
max_builds=1,
ami='ami-2e2a1c46',
region='us-east-1',
placement='b',
user_data='{"SLAVENAME": "%s"}' % slave['name'],
spot_instance=True,
max_spot_price=0.025,
price_multiplier=1.15))
else:
slaves.append(BuildSlave(slave['name'], slave['password']))
|
{
"content_hash": "e68eb146752d7e4cf42a88dc04694792",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 33.76,
"alnum_prop": 0.6101895734597157,
"repo_name": "chfast/ethereum-buildbot",
"id": "5d7acfe1cf43e3e7f0f1ea9ac66b46f228f71edc",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slaves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10308"
},
{
"name": "HTML",
"bytes": "8586"
},
{
"name": "Python",
"bytes": "190842"
},
{
"name": "Ruby",
"bytes": "2406"
}
],
"symlink_target": ""
}
|
from heapq import nsmallest
total = 0
with open('input.txt') as fp:
data = fp.read()
for dimensions in data.split('\n'):
length, width, height = dimensions.split('x')
length = int(length)
width = int(width)
height = int(height)
a, b = nsmallest(2, (length, width, height))
total += (a * 2) + (b * 2)
total += (length * width * height)
print(total)
|
{
"content_hash": "3f7462350c5225f76932811dd478089f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 24.470588235294116,
"alnum_prop": 0.5552884615384616,
"repo_name": "graemeglass/adventofcode",
"id": "7c706064ab49638ea5262e6cd96483042bc654a7",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2/part2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3516"
}
],
"symlink_target": ""
}
|
import os
from datetime import datetime, timedelta
from airflow.models import DAG
try:
from airflow.operators.empty import EmptyOperator
except ModuleNotFoundError:
from airflow.operators.dummy import DummyOperator as EmptyOperator # type: ignore
from airflow.providers.microsoft.azure.operators.data_factory import AzureDataFactoryRunPipelineOperator
from airflow.providers.microsoft.azure.sensors.data_factory import AzureDataFactoryPipelineRunStatusSensor
from airflow.utils.edgemodifier import Label
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_adf_run_pipeline"
with DAG(
dag_id=DAG_ID,
start_date=datetime(2021, 8, 13),
schedule_interval="@daily",
catchup=False,
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=3),
"azure_data_factory_conn_id": "azure_data_factory",
"factory_name": "my-data-factory", # This can also be specified in the ADF connection.
"resource_group_name": "my-resource-group", # This can also be specified in the ADF connection.
},
default_view="graph",
) as dag:
begin = EmptyOperator(task_id="begin")
end = EmptyOperator(task_id="end")
# [START howto_operator_adf_run_pipeline]
run_pipeline1 = AzureDataFactoryRunPipelineOperator(
task_id="run_pipeline1",
pipeline_name="pipeline1",
parameters={"myParam": "value"},
)
# [END howto_operator_adf_run_pipeline]
# [START howto_operator_adf_run_pipeline_async]
run_pipeline2 = AzureDataFactoryRunPipelineOperator(
task_id="run_pipeline2",
pipeline_name="pipeline2",
wait_for_termination=False,
)
pipeline_run_sensor = AzureDataFactoryPipelineRunStatusSensor(
task_id="pipeline_run_sensor",
run_id=run_pipeline2.output["run_id"],
)
# [END howto_operator_adf_run_pipeline_async]
begin >> Label("No async wait") >> run_pipeline1
begin >> Label("Do async wait with sensor") >> run_pipeline2
[run_pipeline1, pipeline_run_sensor] >> end
# Task dependency created via `XComArgs`:
# run_pipeline2 >> pipeline_run_sensor
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
{
"content_hash": "7c3a22f8624d1ee115c663ba5710d6fa",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 106,
"avg_line_length": 35.31944444444444,
"alnum_prop": 0.7027133307117578,
"repo_name": "danielvdende/incubator-airflow",
"id": "03a1a71d63313e0219312cb15caea3f61fc8ca89",
"size": "3328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/system/providers/microsoft/azure/example_adf_run_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
"""
The MatchMaker classes should accept a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
from oslo.config import cfg
from designate.openstack.common import importutils
from designate.openstack.common import log as logging
from designate.openstack.common.rpc import matchmaker as mm_common
redis = importutils.try_import('redis')
matchmaker_redis_opts = [
cfg.StrOpt('host',
default='127.0.0.1',
help='Host to locate redis'),
cfg.IntOpt('port',
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default=None,
help='Password for Redis server. (optional)'),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='matchmaker_redis',
title='Options for Redis-based MatchMaker')
CONF.register_group(opt_group)
CONF.register_opts(matchmaker_redis_opts, opt_group)
LOG = logging.getLogger(__name__)
class RedisExchange(mm_common.Exchange):
def __init__(self, matchmaker):
self.matchmaker = matchmaker
self.redis = matchmaker.redis
super(RedisExchange, self).__init__()
class RedisTopicExchange(RedisExchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host"
"""
def run(self, topic):
while True:
member_name = self.redis.srandmember(topic)
if not member_name:
# If this happens, there are no
# longer any members.
break
if not self.matchmaker.is_alive(topic, member_name):
continue
host = member_name.split('.', 1)[1]
return [(member_name, host)]
return []
class RedisFanoutExchange(RedisExchange):
"""Return a list of all hosts."""
def run(self, topic):
topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic)
good_hosts = filter(
lambda host: self.matchmaker.is_alive(topic, host), hosts)
return [(x, x.split('.', 1)[1]) for x in good_hosts]
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
"""MatchMaker registering and looking-up hosts with a Redis server."""
def __init__(self):
super(MatchMakerRedis, self).__init__()
if not redis:
raise ImportError("Failed to import module redis.")
self.redis = redis.Redis(
host=CONF.matchmaker_redis.host,
port=CONF.matchmaker_redis.port,
password=CONF.matchmaker_redis.password)
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
def ack_alive(self, key, host):
topic = "%s.%s" % (key, host)
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
# If we could not update the expiration, the key
# might have been pruned. Re-register, creating a new
# key in Redis.
self.register(self.topic_host[host], host)
def is_alive(self, topic, host):
if self.redis.ttl(host) == -1:
self.expire(topic, host)
return False
return True
def expire(self, topic, host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.delete(host)
pipe.srem(topic, host)
pipe.execute()
def backend_register(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.sadd(key, key_host)
# No value is needed, we just
# care if it exists. Sets aren't viable
# because only keys can expire.
pipe.set(key_host, '')
pipe.execute()
def backend_unregister(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.srem(key, key_host)
pipe.delete(key_host)
pipe.execute()
|
{
"content_hash": "8c3110df38ec52c20a019ab5f062c930",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 32.276923076923076,
"alnum_prop": 0.6024785510009533,
"repo_name": "NeCTAR-RC/designate",
"id": "4b0fe2085508c0daaed8a014cb390c80d66744f6",
"size": "4815",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "designate/openstack/common/rpc/matchmaker_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1402878"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
import unittest
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestIdentity(unittest.TestCase):
scale = 0.1
shape = (2, 2)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Identity(scale=self.scale)
initializer(w)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Identity(
scale=self.scale, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
@testing.parameterize(
{'shape': (2, 3)},
{'shape': (2, 2, 4)},
{'shape': ()},
{'shape': 0})
class TestIdentityInvalid(unittest.TestCase):
def setUp(self):
self.initializer = initializers.Identity()
def test_invalid_shape(self):
w = numpy.empty(self.shape, dtype=numpy.float32)
with self.assertRaises(ValueError):
self.initializer(w)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConstant(unittest.TestCase):
fill_value = 0.1
shape = (2, 3)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Constant(fill_value=self.fill_value)
initializer(w)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Constant(
fill_value=self.fill_value, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)
|
{
"content_hash": "7eafc9b2a0a23ffb45ccfafdb9420445",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 71,
"avg_line_length": 30.227642276422763,
"alnum_prop": 0.6387842926304464,
"repo_name": "tkerola/chainer",
"id": "84e559eb770d5aae1fc998effc0b4a9e4db906d4",
"size": "3718",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/initializer_tests/test_constant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
}
|
import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppConfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "PNG"
#Set Resolution along X and Y in dpi.
resolutionX = 200.0
resolutionY = 200.0
#Set Width and Height of barcode unit
dimensionX = 0.0
dimensionY = 0.0
try:
#invoke Aspose.BarCode Cloud SDK API to generate image with specific image resolution
response = barcodeApi.PutBarcodeGenerateFile(name, file= None, text=text, type=type, format=format, resolutionX=resolutionX, resolutionY=resolutionY, dimensionX=dimensionX, dimensionY=dimensionY)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1
|
{
"content_hash": "43680e75dd04105e345d5ba8b67697dd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 199,
"avg_line_length": 33.890625,
"alnum_prop": 0.6975564776394652,
"repo_name": "farooqsheikhpk/Aspose.BarCode-for-Cloud",
"id": "8da48c308039214b24fc4b45939bcb236c6de607",
"size": "2169",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Examples/Python/generating-saving/cloud-storage/set-barcode-image-resolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "207"
},
{
"name": "C#",
"bytes": "96633"
},
{
"name": "Java",
"bytes": "65588"
},
{
"name": "JavaScript",
"bytes": "46992"
},
{
"name": "Objective-C",
"bytes": "72063"
},
{
"name": "PHP",
"bytes": "49676"
},
{
"name": "Perl",
"bytes": "74472"
},
{
"name": "Python",
"bytes": "60095"
},
{
"name": "Ruby",
"bytes": "63259"
}
],
"symlink_target": ""
}
|
import markupsafe
from django.db import models
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.bitbucket.api import BitbucketClient
from addons.bitbucket.serializer import BitbucketSerializer
from addons.bitbucket import settings as bitbucket_settings
from addons.bitbucket.exceptions import NotFoundError
from framework.auth import Auth
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = bitbucket_settings.HOOK_DOMAIN or settings.DOMAIN
class BitbucketFileNode(BaseFileNode):
_provider = 'bitbucket'
class BitbucketFolder(BitbucketFileNode, Folder):
pass
class BitbucketFile(BitbucketFileNode, File):
version_identifier = 'commitSha'
def touch(self, auth_header, revision=None, commitSha=None, branch=None, **kwargs):
revision = revision or commitSha or branch
return super(BitbucketFile, self).touch(auth_header, revision=revision, **kwargs)
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
class BitbucketProvider(ExternalProvider):
"""Provider to handler Bitbucket OAuth workflow
API Docs::
* https://developer.atlassian.com/bitbucket/api/2/reference/meta/authentication
* https://confluence.atlassian.com/bitbucket/oauth-on-bitbucket-cloud-238027431.html
"""
name = 'Bitbucket'
short_name = 'bitbucket'
client_id = bitbucket_settings.CLIENT_ID
client_secret = bitbucket_settings.CLIENT_SECRET
auth_url_base = bitbucket_settings.OAUTH_AUTHORIZE_URL
callback_url = bitbucket_settings.OAUTH_ACCESS_TOKEN_URL
default_scopes = bitbucket_settings.SCOPE
auto_refresh_url = callback_url
refresh_time = bitbucket_settings.REFRESH_TIME
expiry_time = bitbucket_settings.EXPIRY_TIME
def handle_callback(self, response):
"""View called when the OAuth flow is completed. Adds a new BitbucketUserSettings
record to the user and saves the account info.
"""
client = BitbucketClient(access_token=response['access_token'])
user_info = client.user()
return {
'provider_id': user_info['uuid'],
'profile_url': user_info['links']['html']['href'],
'display_name': user_info['username']
}
def fetch_access_token(self, force_refresh=False):
self.refresh_oauth_key(force=force_refresh)
return self.account.oauth_key
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific bitbucket information
Quirks::
* Bitbucket does not support remote revocation of access tokens.
"""
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
# Required for importing username from social profile configuration page
# Assumes oldest connected account is primary.
@property
def public_id(self):
bitbucket_accounts = self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
if bitbucket_accounts:
return bitbucket_accounts[0].display_name
return None
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""Authenticated ExternalProvider instance"""
if self._api is None:
self._api = BitbucketProvider(self.external_account)
return self._api
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='bitbucket_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.hook_id = None
def deauthorize(self, auth=None, log=True):
# self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='bitbucket_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.user and self.repo:
return 'https://bitbucket.org/{0}/{1}/'.format(
self.user, self.repo
)
@property
def short_url(self):
if self.user and self.repo:
return '/'.join([self.user, self.repo])
@property
def is_private(self):
repo = self.fetch_repo()
if repo:
return repo['is_private']
return None
def fetch_repo(self):
connection = BitbucketClient(access_token=self.api.fetch_access_token())
return connection.repo(user=self.user, repo=self.repo)
def fetch_access_token(self):
return self.api.fetch_access_token()
# TODO: Delete me and replace with serialize_settings / Knockout
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('bitbucket')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
connection = BitbucketClient(access_token=self.api.fetch_access_token())
valid_credentials = True
try:
mine = connection.repos()
repo_names = [
repo['full_name'].replace('/', ' / ')
for repo in mine
]
except Exception:
repo_names = []
valid_credentials = False
owner = self.user_settings.owner
if owner == user:
ret.update({'repo_names': repo_names})
ret.update({
'node_has_auth': True,
'bitbucket_user': self.user or '',
'bitbucket_repo': self.repo or '',
'bitbucket_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'bitbucket_user_name': self.external_account.display_name,
'bitbucket_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.api.fetch_access_token()}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'owner': self.user,
'repo': self.repo,
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='bitbucket')
sha, urls = None, {}
try:
sha = metadata['extra']['commitSha']
urls = {
'view': '{0}?commitSha={1}'.format(url, sha),
'download': '{0}?action=download&commitSha={1}'.format(url, sha)
}
except KeyError:
pass
self.owner.add_log(
'bitbucket_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'bitbucket': {
'user': self.user,
'repo': self.repo,
'commitSha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor_or_group_member(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
repo_data = self.fetch_repo()
if repo_data:
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if repo_data['is_private'] else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the Bitbucket '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private Bitbucket '
'repository through this public project.'
)
else:
message += (
' The files in this Bitbucket repo can be viewed on Bitbucket '
'<u><a href="https://bitbucket.org/{user}/{repo}/">here</a></u>.'
).format(
user=self.user,
repo=self.repo,
)
messages.append(message)
else:
message = (
'Warning: the Bitbucket repo {user} / {repo} connected to this OSF {category} has been deleted.'.format(
category=markupsafe.escape(node.project_or_component),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'bitbucket/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the Bitbucket add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""Hook to run after forking a project. If the forking user is not
the same as the original authorizing user, the Bitbucket
credentials will *not* be copied over.
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private Bitbucket repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on Bitbucket.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
|
{
"content_hash": "84014474cae71b1102f254a432183e90",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 122,
"avg_line_length": 33.63876651982379,
"alnum_prop": 0.57124148768989,
"repo_name": "Johnetordoff/osf.io",
"id": "b9f22b79ce2e3c1ac23b9db85da026ba6efbb945",
"size": "15297",
"binary": false,
"copies": "9",
"ref": "refs/heads/develop",
"path": "addons/bitbucket/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11587197"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import asyncio
import netius
@asyncio.coroutine
def compute(x, y):
print("Compute %s + %s ..." % (x, y))
yield from asyncio.sleep(1.0)
return x + y
@asyncio.coroutine
def print_sum(x, y):
result = yield from compute(x, y)
print("%s + %s = %s" % (x, y, result))
loop = netius.get_loop(_compat = True)
loop.run_until_complete(print_sum(1, 2))
loop.close()
|
{
"content_hash": "7f0650372f8b8d6f570dfc2dde3c0082",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 29.8,
"alnum_prop": 0.6644295302013423,
"repo_name": "hivesolutions/netius",
"id": "82eb7f39ae2383efeb67ce43813a58cc0a0ec913",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic/loop_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1400497"
}
],
"symlink_target": ""
}
|
import os
import sys
import math
import time
import codecs
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn import init
import torch.autograd as autograd
from torch.autograd import Variable
from random import shuffle
parser = argparse.ArgumentParser()
parser.add_argument('--word_dim', dest='word_dim', type=int, help='word embedding dimension', default=20)
parser.add_argument('--hid_size', dest='hid_size', type=int, help='hidden dimension size', default=50)
parser.add_argument('--enc_size', dest='enc_size', type=int, help='encode dimension size', default=10)
parser.add_argument('--epochs', dest='epochs', type=int, help='epochs for model training', default=10)
parser.add_argument('--learning_rate', dest='learning_rate', type=float, help='learning_rate', default=5e-3)
parser.add_argument('--init_weight', dest='init_weight', type=float, help='initial weight for OOV', default=5e-1)
parser.add_argument('--seed', dest='seed', type=int, help='random seed', default=2718281828)
parser.add_argument('--emb_file', dest='emb_file', type=str, help='embedding file', default='/Volumes/SANDISK/Workshop/Corpus/bin/text.txt')
parser.add_argument('--data_file', dest='data_file', type=str, help='data file', default='/Volumes/SANDISK/Workshop/Corpus/imdb/tmp')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_gpu = torch.cuda.is_available()
print(args)
class RVAE(nn.Module):
def __init__(self, word_dim, hid_size, enc_size):
super(RVAE, self).__init__()
self.WeC2P = nn.Linear(2 * word_dim, word_dim)
self.WeP2H = nn.Linear(word_dim, hid_size)
self.WeH2H = nn.Linear(hid_size, hid_size)
self.WeH2M = nn.Linear(hid_size, enc_size)
self.WeH2D = nn.Linear(hid_size, enc_size)
self.WdE2H = nn.Linear(enc_size, hid_size)
self.WdH2H = nn.Linear(hid_size, hid_size)
self.WdH2P = nn.Linear(hid_size, word_dim)
self.WdP2C = nn.Linear(word_dim, 2 * word_dim)
self.tanh = nn.Tanh()
self.soft = nn.LogSoftmax()
def encode(self, sent, node, size):
parent = Variable(torch.FloatTensor(torch.randn(1, size)))
for i in range(node):
if i == 0:
parent = sent[i]
else:
parent = self.tanh(self.WeC2P(torch.cat((parent, sent[i]), 0)))
hid_code = self.tanh(self.WeH2H(self.tanh(self.WeP2H(parent))))
mu = self.tanh(self.WeH2M(hid_code))
va = self.tanh(self.WeH2D(hid_code))
return hid_code, mu, va
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def decode(self, code, node, size):
sent = Variable(torch.FloatTensor(torch.randn(node, size)).zero_())
parent = self.tanh(self.WdH2P(self.tanh(self.WdH2H(self.tanh(self.WdE2H(code))))))
for i in range(node - 1):
if i < node - 2:
children = self.tanh(self.WdP2C(parent))
sent[node - i - 1] = children[0:size]
parent = children[size:]
else:
children = self.tanh(self.WdP2C(parent))
sent[1] = children[0:size]
sent[0] = children[size:]
return sent
def forward(self, sent):
node = sent.size()[0]
size = sent.size()[1]
hi, mu, va = self.encode(sent, node, size)
zc = self.reparameterize(mu, va)
se = self.decode(zc, node, size)
sc = self.soft(zc)
return se, mu, va, sc
def loss_function(recon_x, x, mu, logvar):
mse = nn.MSELoss()
mse_loss = mse(recon_x, x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return mse_loss + KLD
cel_criterion = nn.CrossEntropyLoss()
rvae = RVAE(args.word_dim, args.hid_size, args.enc_size)
optimizer = optim.Adam(rvae.parameters(), lr=args.learning_rate)
if use_gpu:
rvae = rave.cuda()
cel_criterion = cel_criterion.cuda()
def sentiment_corpus(srcFile, tarFile):
wrtFile = codecs.open(tarFile, 'a+', 'utf-8')
with open(srcFile, encoding='utf-8') as f:
for line in f:
label = line[1:2]
sent = ""
tokens = line.split()
for i in range(len(tokens)):
if ")" in tokens[i]:
words = tokens[i].split(')')
if "LRB" not in words[0] and "RRB" not in words[0] and "--" not in words[0]:
if len(sent) == 0:
sent = words[0]
else:
sent = sent + " " + words[0]
sent = sent.replace("\/", " ")
sent = sent.replace(" - ", " ")
sent = sent.replace("\*", " ")
sent = sent.replace(" ", " ")
sent = sent.replace(" ", " ")
toks = sent.split()
if len(toks) > 4:
wrtFile.write(label + "\t" + sent + "\n")
wrtFile.close()
def data():
srcTrainFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/ssc/train.txt"
tarTrainFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/tmp/train.txt"
srcValidFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/ssc/valid.txt"
tarValidFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/tmp/valid.txt"
srcTestFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/ssc/test.txt"
tarTestFile = "/Users/hjp/MacBook/Workspace/Workshop/Corpus/tmp/test.txt"
sentiment_corpus(srcTrainFile, tarTrainFile)
sentiment_corpus(srcValidFile, tarValidFile)
sentiment_corpus(srcTestFile, tarTestFile)
def read_embedding():
emb_voc, emb_vec = [], {}
with open(args.emb_file, encoding='utf-8') as lines:
for line in lines:
tokens = line.split()
if len(tokens) == (args.word_dim + 1):
emb_voc.append(tokens[0])
emb_vec[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
lines.close()
return emb_voc, emb_vec
oov, voc = 0, 0
def build_embedding(ssc_voc, ssc_vec, line, emb_voc, emb_vec):
global oov, voc
tokens = line.split()
for i in range(len(tokens)):
if tokens[i] not in ssc_voc:
voc += 1
ssc_voc.append(tokens[i])
if tokens[i] in emb_voc:
ssc_vec[tokens[i]] = torch.from_numpy(emb_vec[tokens[i]]).view(1, args.word_dim)
else:
ssc_vec[tokens[i]] = init.normal(torch.Tensor(1, args.word_dim), 0, args.init_weight)
oov += 1
return ssc_voc, ssc_vec
def read_corpus(emb_voc, emb_vec):
ssc_voc, ssc_vec = [], {}
train_data, valid_data, test_data = [], [], []
assert os.path.exists(args.data_file)
with open(os.path.join(args.data_file, 'train.txt'), encoding='utf-8') as lines:
for line in lines:
sents = line.lower().split('\t')
train_data.append(sents[0] + "\t" + sents[1])
ssc_voc, ssc_vec = build_embedding(ssc_voc, ssc_vec, sents[1], emb_voc, emb_vec)
with open(os.path.join(args.data_file, 'valid.txt'), encoding='utf-8') as lines:
for line in lines:
sents = line.lower().split('\t')
valid_data.append(sents[0] + "\t" + sents[1])
ssc_voc, ssc_vec = build_embedding(ssc_voc, ssc_vec, sents[1], emb_voc, emb_vec)
with open(os.path.join(args.data_file, 'test.txt'), encoding='utf-8') as lines:
for line in lines:
sents = line.lower().split('\t')
test_data.append(sents[0] + "\t" + sents[1])
ssc_voc, ssc_vec = build_embedding(ssc_voc, ssc_vec, sents[1], emb_voc, emb_vec)
return train_data, valid_data, test_data, ssc_voc, ssc_vec
def build_semcom(line, ssc_vec):
sents = line.split('\t')
tokens = sents[1].split()
tags = sents[2].split()
label = torch.LongTensor([int(sents[0])])
row, idx = 0, 0
for i in range(len(tags)):
if tags[i][0:1] == 'b' or tags[i][0:1] == 'o':
row += 1
sentm = torch.FloatTensor(row, args.word_dim)
for i in range(len(tags)):
if tags[i][0:1] == 'b' or tags[i][0:1] == 'o':
sentm[idx] = ssc_vec[tokens[i]]
if idx < row - 1:
idx = idx + 1
else:
sentm[idx] = sentm[idx] + ssc_vec[tokens[i]]
return label, sentm
def build_matrix(line, ssc_vec):
sents = line.lower().split('\t')
tokens = sents[1].split()
label = torch.LongTensor([int(sents[0])])
sentm = torch.FloatTensor(len(tokens), args.word_dim).zero_()
for i in range(len(tokens)):
sentm[i] = ssc_vec[tokens[i]]
return label, sentm
def set_timer(sec):
min = math.floor(sec / 60)
sec -= min * 60
return '%d min %d sec!' % (min, sec)
def train():
accu_best, accu_test = 0, 0
emb_voc, emb_vec = read_embedding()
train_data, valid_data, test_data, ssc_voc, ssc_vec = read_corpus(emb_voc, emb_vec)
for i in range(args.epochs):
start = time.time()
random.shuffle(train_data)
epoch_storage, train_correct, valid_correct, test_correct = 0, 0, 0, 0
for j in range(len(train_data)):
label, sentm = build_matrix(train_data[j], ssc_vec)
sent = Variable(sentm)
target = Variable(label)
if use_gpu:
sent = sent.cuda()
target = target.cuda()
out, mu, va, sc = rvae(sent)
_, predicted = torch.max(sc.view(1, args.enc_size), 1)
if predicted[0].data[0] == target.data[0]:
train_correct += 1
loss = loss_function(out, sent, mu, va)
if use_gpu:
loss = loss.cuda()
loss = loss + cel_criterion(sc.view(1, args.enc_size), target)
epoch_storage += loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("total loss: " + str(epoch_storage / len(train_data)))
print("train accu: " + str(train_correct / len(train_data)))
for j in range(len(valid_data)):
label, sentm = build_matrix(valid_data[j], ssc_vec)
sent = Variable(sentm)
target = Variable(label)
if use_gpu:
sent = sent.cuda()
target = target.cuda()
out, mu, va, sc = rvae(sent)
_, predicted = torch.max(sc.view(1, args.enc_size), 1)
if predicted[0].data[0] == target.data[0]:
valid_correct += 1
accu_valid = valid_correct / len(valid_data)
print("valid accu: " + str(accu_valid))
if accu_valid > accu_best:
accu_best = accu_valid
for j in range(len(test_data)):
label, sentm = build_matrix(test_data[j], ssc_vec)
sent = Variable(sentm)
target = Variable(label)
if use_gpu:
sent = sent.cuda()
target = target.cuda()
out, mu, va, sc = rvae(sent)
_, predicted = torch.max(sc.view(1, args.enc_size), 1)
if predicted[0].data[0] == target.data[0]:
test_correct += 1
print("epochs: " + str(i + 1) + " test accu: " + str(test_correct / len(test_data)))
end = time.time()
print("cost time: " + set_timer(end - start))
def main():
train()
print("oov: ", oov, "\tvoc: ", voc)
if __name__ == "__main__":
main()
|
{
"content_hash": "58d948e4897012114863c2263610339e",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 140,
"avg_line_length": 36.01510574018127,
"alnum_prop": 0.5474372955288986,
"repo_name": "hjpwhu/Python",
"id": "e9f52354ad473aa1deab2b8c32a80f85200fad23",
"size": "11921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hjp.edu.torch.ml.rvae/rvae.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111395"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from profile import views
urlpatterns = [
url(r'^$', views.login, name='login'),
url(r'^login/', views.login, name='login'),
]
|
{
"content_hash": "da389d7b6ef1d1bc659ef382efce9d11",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.6568047337278107,
"repo_name": "quangnguyen-asnet/python-django",
"id": "a6f13f8e4daeaf28e4446bffc0ccc4081da83de7",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mymenu/profile/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "686"
},
{
"name": "HTML",
"bytes": "16744"
},
{
"name": "JavaScript",
"bytes": "838"
},
{
"name": "Python",
"bytes": "26685"
}
],
"symlink_target": ""
}
|
import jingo
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from olympia.reviews.models import ReviewFlag
from olympia.reviews.forms import ReviewForm
class HelpersTest(TestCase):
def render(self, s, context={}):
t = jingo.get_env().from_string(s)
return t.render(context)
def test_stars(self):
s = self.render('{{ num|stars }}', {'num': None})
assert s == 'Not yet rated'
doc = pq(self.render('{{ num|stars }}', {'num': 1}))
msg = 'Rated 1 out of 5 stars'
assert doc.attr('class') == 'stars stars-1'
assert doc.attr('title') == msg
assert doc.text() == msg
def test_stars_details_page(self):
doc = pq(self.render('{{ num|stars(large=True) }}', {'num': 2}))
assert doc('.stars').attr('class') == 'stars large stars-2'
def test_stars_max(self):
doc = pq(self.render('{{ num|stars }}', {'num': 5.3}))
assert doc.attr('class') == 'stars stars-5'
def test_reviews_link(self):
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
s = self.render('{{ reviews_link(myaddon) }}', {'myaddon': a})
assert pq(s)('strong').text() == '37 reviews'
# without collection uuid
assert pq(s)('a').attr('href') == '/en-US/firefox/addon/xx/#reviews'
# with collection uuid
myuuid = 'f19a8822-1ee3-4145-9440-0a3640201fe6'
s = self.render('{{ reviews_link(myaddon, myuuid) }}',
{'myaddon': a, 'myuuid': myuuid})
assert pq(s)('a').attr('href') == (
'/en-US/firefox/addon/xx/?collection_uuid=%s#reviews' % myuuid)
z = Addon(average_rating=0, total_reviews=0, id=1, type=1, slug='xx')
s = self.render('{{ reviews_link(myaddon) }}', {'myaddon': z})
assert pq(s)('strong').text() == 'Not yet rated'
# with link
u = reverse('addons.reviews.list', args=['xx'])
s = self.render('{{ reviews_link(myaddon, link_to_list=True) }}',
{'myaddon': a})
assert pq(s)('a').attr('href') == u
def test_impala_reviews_link(self):
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
s = self.render('{{ impala_reviews_link(myaddon) }}', {'myaddon': a})
assert pq(s)('a').text() == '(37)'
# without collection uuid
assert pq(s)('a').attr('href') == '/en-US/firefox/addon/xx/#reviews'
# with collection uuid
myuuid = 'f19a8822-1ee3-4145-9440-0a3640201fe6'
s = self.render('{{ impala_reviews_link(myaddon, myuuid) }}',
{'myaddon': a, 'myuuid': myuuid})
assert pq(s)('a').attr('href') == (
'/en-US/firefox/addon/xx/?collection_uuid=%s#reviews' % myuuid)
z = Addon(average_rating=0, total_reviews=0, id=1, type=1, slug='xx')
s = self.render('{{ impala_reviews_link(myaddon) }}', {'myaddon': z})
assert pq(s)('b').text() == 'Not yet rated'
# with link
u = reverse('addons.reviews.list', args=['xx'])
s = self.render(
'{{ impala_reviews_link(myaddon, link_to_list=True) }}',
{'myaddon': a})
assert pq(s)('a').attr('href') == u
def test_mobile_reviews_link(self):
def s(a):
return pq(self.render('{{ mobile_reviews_link(myaddon) }}',
{'myaddon': a}))
a = Addon(total_reviews=0, id=1, type=1, slug='xx')
doc = s(a)
assert doc('a').attr('href') == (
reverse('addons.reviews.add', args=['xx']))
u = reverse('addons.reviews.list', args=['xx'])
a = Addon(average_rating=4, total_reviews=37, id=1, type=1, slug='xx')
doc = s(a)
assert doc('a').attr('href') == u
assert doc('a').text() == 'Rated 4 out of 5 stars See All 37 Reviews'
a = Addon(average_rating=4, total_reviews=1, id=1, type=1, slug='xx')
doc = s(a)
doc.remove('div')
assert doc('a').attr('href') == u
assert doc('a').text() == 'See All Reviews'
def test_report_review_popup(self):
doc = pq(self.render('{{ report_review_popup() }}'))
assert doc('.popup.review-reason').length == 1
for flag, text in ReviewFlag.FLAGS:
assert doc('li a[href$=%s]' % flag).text() == text
assert doc('form input[name=note]').length == 1
def test_edit_review_form(self):
doc = pq(self.render('{{ edit_review_form() }}'))
assert doc('#review-edit-form').length == 1
assert doc('p.req').length == 1
for name in ReviewForm().fields.keys():
assert doc('[name=%s]' % name).length == 1
|
{
"content_hash": "a905f2af9995d245e0b0339cde86be76",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 78,
"avg_line_length": 39.743801652892564,
"alnum_prop": 0.5485547930962779,
"repo_name": "andymckay/olympia",
"id": "290da0d3a3f13fce221c39bb9b39d134cd82d210",
"size": "4809",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/reviews/tests/test_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "648054"
},
{
"name": "HTML",
"bytes": "1610164"
},
{
"name": "JavaScript",
"bytes": "1399551"
},
{
"name": "Makefile",
"bytes": "3926"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4007371"
},
{
"name": "Shell",
"bytes": "10337"
},
{
"name": "Smarty",
"bytes": "2160"
}
],
"symlink_target": ""
}
|
import datetime
from django.core.urlresolvers import reverse
from django import http
from django.utils import timezone
from mox import IsA # noqa
from horizon.templatetags import sizeformat
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled):
self.mox.StubOutWithMock(api.nova, 'usage_list')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.keystone, 'tenant_list')
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'security_group_list')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def test_usage(self):
self._test_usage(nova_stu_enabled=True)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False)
def _test_usage(self, nova_stu_enabled=True):
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
if nova_stu_enabled:
api.nova.usage_list(IsA(http.HttpRequest),
datetime.datetime(now.year,
now.month,
now.day, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn([usage_obj])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertTemplateUsed(res, 'admin/overview/usage.html')
self.assertTrue(isinstance(res.context['usage'], usage.GlobalUsage))
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
usage_table = '<td class="sortable normal_column">test_tenant</td>' \
'<td class="sortable normal_column">%s</td>' \
'<td class="sortable normal_column">%s</td>' \
'<td class="sortable normal_column">%s</td>' \
'<td class="sortable normal_column">%.2f</td>' \
'<td class="sortable normal_column">%.2f</td>' % \
(usage_obj.vcpus,
usage_obj.disk_gb_hours,
sizeformat.mbformat(usage_obj.memory_mb),
usage_obj.vcpu_hours,
usage_obj.total_local_gb_usage)
if nova_stu_enabled:
self.assertContains(res, usage_table)
else:
self.assertNotContains(res, usage_table)
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
now = timezone.now()
usage_obj = [api.nova.NovaUsage(u) for u in self.usages.list()]
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
if nova_stu_enabled:
api.nova.usage_list(IsA(http.HttpRequest),
datetime.datetime(now.year,
now.month,
now.day, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
csv_url = reverse('horizon:admin:overview:index') + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'admin/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.GlobalUsage))
hdr = 'Project Name,VCPUs,Ram (MB),Disk (GB),Usage (Hours)'
self.assertContains(res, '%s\r\n' % hdr)
if nova_stu_enabled:
for obj in usage_obj:
row = u'{0},{1},{2},{3},{4:.2f}\r\n'.format(obj.project_name,
obj.vcpus,
obj.memory_mb,
obj.disk_gb_hours,
obj.vcpu_hours)
self.assertContains(res, row)
|
{
"content_hash": "ecdd990d155bb6792ceb8be1dc8b85b0",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 46.71917808219178,
"alnum_prop": 0.5405365782143381,
"repo_name": "neudesk/neucloud",
"id": "2dda328498c200ae8befc9c3dae1ab41c4f21202",
"size": "7630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/overview/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "169426"
},
{
"name": "JavaScript",
"bytes": "426538"
},
{
"name": "Python",
"bytes": "3100734"
},
{
"name": "Shell",
"bytes": "13743"
}
],
"symlink_target": ""
}
|
"""Functionality to support interacting with an OnRamp Database
"""
import os
import json
import exceptions
class Database():
# JJH TODO Need a reverse lookup
pce_states = { 0 : "Running",
1 : "Establishing Connection",
2 : "Down",
-1 : "Error: Undefined",
}
module_states = { 0 : "Not on PCE",
1 : "Available on PCE, Not Installed",
2 : "Checkout in progress",
-2 : "Error: Checkout failed",
3 : "Available on PCE, Installed, Not Deployed",
4 : "Available on PCE, Deploying",
-4 : "Error: Deploy failed",
5 : "Available on PCE, Deploy wait for admin",
6 : "Available on PCE, Deployed",
-99 : "Error: Undefined",
}
job_states = { 0 : "Unknown job id",
1 : "Setting up launch",
-1 : "Launch failed",
2 : "Preprocessing",
-2 : "Preprocess failed",
3 : "Scheduled",
-3 : "Schedule failed",
4 : "Queued",
5 : "Running",
-5 : "Run failed",
6 : "Postprocessing",
7 : "Done",
-99 : "Error: Undefined",
}
def __init__(self, logger, auth):
self._auth = auth
self._logger = logger
def is_connected(self):
raise NotImplemented("Please implement this method")
def connect(self):
raise NotImplemented("Please implement this method")
def disconnect(self):
raise NotImplemented("Please implement this method")
##########################################################
def is_valid_session_id(self, session_id):
raise NotImplemented("Please implement this method")
def is_valid_user_id(self, user_id):
raise NotImplemented("Please implement this method")
def is_valid_workspace_id(self, workspace_id):
raise NotImplemented("Please implement this method")
def is_valid_pce_id(self, pce_id):
raise NotImplemented("Please implement this method")
def is_valid_module_id(self, module_id):
raise NotImplemented("Please implement this method")
def is_valid_job_id(self, job_id):
raise NotImplemented("Please implement this method")
def is_valid_user_workspace(self, user_id, workspace_id):
raise NotImplemented("Please implement this method")
def is_valid_pce_module(self, pce_id, module_id):
raise NotImplemented("Please implement this method")
def is_valid_workspace_pce_module(self, workspace_id, pce_id, module_id):
raise NotImplemented("Please implement this method")
##########################################################
def is_active_session_id(self, session_id, user_id=None):
raise NotImplemented("Please implement this method")
def session_start(self, user_id):
raise NotImplemented("Please implement this method")
def session_update(self, session_id):
raise NotImplemented("Please implement this method")
def session_stop(self, session_id):
raise NotImplemented("Please implement this method")
##########################################################
def get_user_id(self, req_admin, username, password=None):
raise NotImplemented("Please implement this method")
def add_user(self, username, password):
raise NotImplemented("Please implement this method")
def get_user_info(self, user_id=None):
raise NotImplemented("Please implement this method")
def get_user_workspaces(self, user_id):
raise NotImplemented("Please implement this method")
def get_user_jobs(self, user_id, search_params):
raise NotImplemented("Please implement this method")
##########################################################
def get_workspace_id(self, name):
raise NotImplemented("Please implement this method")
def add_workspace(self, name):
raise NotImplemented("Please implement this method")
def lookup_user_in_workspace(self, workspace_id, user_id):
raise NotImplemented("Please implement this method")
def add_user_to_workspace(self, workspace_id, user_id):
raise NotImplemented("Please implement this method")
def lookup_pair_in_workspace(self, workspace_id, pm_pair_id):
raise NotImplemented("Please implement this method")
def add_pair_to_workspace(self, workspace_id, pm_pair_id):
raise NotImplemented("Please implement this method")
def get_workspace_info(self, workspace_id=None):
raise NotImplemented("Please implement this method")
def get_workspace_doc(self, workspace_id):
raise NotImplemented("Please implement this method")
def get_workspace_users(self, workspace_id):
raise NotImplemented("Please implement this method")
def get_workspace_pairs(self, workspace_id):
raise NotImplemented("Please implement this method")
def get_workspace_jobs(self, workspace_id, search_params):
raise NotImplemented("Please implement this method")
##########################################################
def get_all_pce_ids(self):
raise NotImplemented("Please implement this method")
def get_pce_id(self, info):
raise NotImplemented("Please implement this method")
def add_pce(self, info):
raise NotImplemented("Please implement this method")
def lookup_module_in_pce(self, pce_id, module_id):
raise NotImplemented("Please implement this method")
def add_module_to_pce(self, pce_id, module_id, src_location_type='local', src_location_path=''):
raise NotImplemented("Please implement this method")
def update_pce_module_state(self, pce_id, module_id, state):
raise NotImplemented("Please implement this method")
def get_pce_info(self, pce_id=None):
raise NotImplemented("Please implement this method")
def get_pce_state(self, pce_id):
raise NotImplemented("Please implement this method")
def update_pce_state(self, pce_id, state):
raise NotImplemented("Please implement this method")
def get_pce_doc(self, pce_id):
raise NotImplemented("Please implement this method")
def get_pce_workspaces(self, pce_id):
raise NotImplemented("Please implement this method")
def get_pce_modules(self, pce_id, module_id=None):
raise NotImplemented("Please implement this method")
def get_pce_jobs(self, pce_id, search_params):
raise NotImplemented("Please implement this method")
##########################################################
def get_module_id(self, name):
raise NotImplemented("Please implement this method")
def add_module(self, name):
raise NotImplemented("Please implement this method")
def get_module_info(self, module_id=None):
raise NotImplemented("Please implement this method")
def get_module_doc(self, module_id):
raise NotImplemented("Please implement this method")
def get_module_pces(self, module_id):
raise NotImplemented("Please implement this method")
def get_module_jobs(self, module_id, search_params):
raise NotImplemented("Please implement this method")
##########################################################
def find_job_id(self, user_id, workspace_id, pce_id, module_id, job_name):
raise NotImplemented("Please implement this method")
def add_job(self, user_id, workspace_id, pce_id, module_id, job_data):
raise NotImplemented("Please implement this method")
def get_job_info(self, module_id=None, search_params={}):
raise NotImplemented("Please implement this method")
def get_job_data(self, job_id):
raise NotImplemented("Please implement this method")
def update_job_state(self, job_id, state):
raise NotImplemented("Please implement this method")
##########################################################
from webapp.onrampdb_sqlite import Database_sqlite
##########################################
class DBAccess():
_known_db = { 'sqlite' : Database_sqlite }
##########################################
def __init__(self, logger, dbtype, auth):
self._logger = logger
if dbtype not in self._known_db:
self._logger.critical( "Database: \"%s\" is not supported." % (dbtype) )
raise NotImplementedError
self._db = self._known_db[dbtype](logger, auth)
##########################################
# State translations
##########################################
def get_pce_state_str(self, id):
if id not in self._db.pce_states:
return None
return self._db.pce_states[id]
def get_pce_states(self):
return self._db.pce_states
def get_module_state_str(self, id):
if id not in self._db.module_states:
return None
return self._db.module_states[id]
def get_module_states(self):
return self._db.module_states
def get_job_state_str(self, id):
if id not in self._db.job_states:
return None
return self._db.job_states[id]
def get_job_states(self):
return self._db.job_states
##########################################
# Valid keys
##########################################
def is_valid_user_id(self, user_id):
self._db.connect()
result = self._db.is_valid_user_id(user_id)
self._db.disconnect()
return result
def is_valid_workspace_id(self, workspace_id):
self._db.connect()
result = self._db.is_valid_workspace_id(workspace_id)
self._db.disconnect()
return result
def is_valid_pce_id(self, pce_id):
self._db.connect()
result = self._db.is_valid_pce_id(pce_id)
self._db.disconnect()
return result
def is_valid_module_id(self, module_id):
self._db.connect()
result = self._db.is_valid_module_id(module_id)
self._db.disconnect()
return result
def is_valid_job_id(self, job_id):
self._db.connect()
result = self._db.is_valid_job_id(job_id)
self._db.disconnect()
return result
def is_valid_user_workspace(self, user_id, workspace_id):
self._db.connect()
result = self._db.is_valid_user_workspace(user_id, workspace_id)
self._db.disconnect()
return result
def is_valid_pce_module(self, pce_id, module_id):
self._db.connect()
result = self._db.is_valid_pce_module(pce_id, module_id)
self._db.disconnect()
return result
def is_valid_workspace_pce_module(self, workspace_id, pce_id, module_id):
self._db.connect()
result = self._db.is_valid_workspace_pce_module(workspace_id, pce_id, module_id)
self._db.disconnect()
return result
##########################################
# User Management
##########################################
def user_login(self, username, password):
self._db.connect()
user_id = self._db.get_user_id(False, username, password)
if user_id is None:
self._db.disconnect()
return None
session_id = self._db.session_start(user_id)
self._db.disconnect()
# TODO create a real apikey tied to this session
return {'user_id': user_id, 'session_id': session_id, 'apikey' : session_id}
def user_update(self, auth ):
self._db.connect()
self._db.session_update( auth['session_id'] )
self._db.disconnect()
return True
def user_logout(self, auth ):
self._db.connect()
self._db.session_stop( auth['session_id'] )
self._db.disconnect()
return True
def check_user_apikey(self, apikey ):
self._db.connect()
result = self._db.is_active_session_id( apikey )
self._db.disconnect()
return result
def check_user_auth(self, auth, req_admin=False ):
req_keys = ["session_id", "username", "user_id"]
for key in req_keys:
if key not in auth.keys():
return False
user_id = self.user_lookup( auth['username'], req_admin=req_admin )
# Username does not exist
if user_id is None:
return False
# ID mismatch
elif user_id != auth['user_id']:
return False
# Session inactive -- TODO
else:
self._db.connect()
result = self._db.is_active_session_id(auth['session_id'], auth['user_id'])
self._db.disconnect()
return result
return True
##########################################
def user_add_if_new(self, username, password):
self._db.connect()
info = {}
user_id = self._db.get_user_id(False, username)
if user_id is not None:
info['exists'] = True
else:
info['exists'] = False
user_id = self._db.add_user(username, password)
info['id'] = user_id
self._db.disconnect()
return info
##########################################
def user_lookup(self, username, req_admin=False):
self._db.connect()
user_id = self._db.get_user_id(req_admin, username)
self._db.disconnect()
return user_id
##########################################
def user_add(self, username, password):
self._db.connect()
user_id = self._db.add_user(username, password)
self._db.disconnect()
return user_id
##########################################
def user_get_info(self, user_id=None):
self._db.connect()
if user_id is not None and self._db.is_valid_user_id(user_id) is False:
self._logger.error("Invalid User ID ("+str(user_id)+")")
self._db.disconnect()
return None
user_info = self._db.get_user_info(user_id)
self._db.disconnect()
return user_info
##########################################
def user_get_workspaces(self, user_id):
self._db.connect()
if self._db.is_valid_user_id(user_id) is False:
self._logger.error("Invalid User ID ("+str(user_id)+")")
self._db.disconnect()
return None
user_info = self._db.get_user_workspaces(user_id)
self._db.disconnect()
return user_info
##########################################
def user_get_jobs(self, user_id, search_params={}):
self._db.connect()
if self._db.is_valid_user_id(user_id) is False:
self._logger.error("Invalid User ID ("+str(user_id)+")")
self._db.disconnect()
return None
user_info = self._db.get_user_jobs(user_id, search_params)
self._db.disconnect()
return user_info
##########################################
# Workspace Management
##########################################
def workspace_add_if_new(self, name):
self._db.connect()
info = {}
workspace_id = self._db.get_workspace_id(name)
if workspace_id is not None:
info['exists'] = True
else:
info['exists'] = False
workspace_id = self._db.add_workspace(name)
info['id'] = workspace_id
self._db.disconnect()
return info
##########################################
def workspace_lookup(self, name):
self._db.connect()
work_id = self._db.get_workspace_id(name)
self._db.disconnect()
return work_id
##########################################
def workspace_add(self, name):
self._db.connect()
work_id = self._db.add_workspace(name)
self._db.disconnect()
return work_id
##########################################
def workspace_add_user(self, workspace_id, user_id):
self._db.connect()
info = {}
if self._db.is_valid_workspace_id(workspace_id) is False:
info['error_msg'] = "Invalid Workspace ID ("+str(workspace_id)+")"
self._db.disconnect()
return info
if self._db.is_valid_user_id(user_id) is False:
info['error_msg'] = "Invalid User ID ("+str(user_id)+")"
self._db.disconnect()
return info
pair_id = self._db.lookup_user_in_workspace(workspace_id, user_id)
if pair_id is not None:
info['exists'] = True
else:
info['exists'] = False
pair_id = self._db.add_user_to_workspace(workspace_id, user_id)
info['id'] = pair_id
self._db.disconnect()
return info
##########################################
def workspace_add_pair(self, workspace_id, pce_id, module_id):
self._db.connect()
info = {}
if self._db.is_valid_workspace_id(workspace_id) is False:
info['error_msg'] = "Invalid Workspace ID ("+str(workspace_id)+")"
self._db.disconnect()
return info
pm_pair_id = self._db.lookup_module_in_pce(pce_id, module_id)
if pm_pair_id is None:
info['error_msg'] = "Invalid Module / PCE Pair (module="+str(module_id)+", pce="+str(pce_id)+")"
self._db.disconnect()
return info
pair_id = self._db.lookup_pair_in_workspace(workspace_id, pm_pair_id)
if pair_id is not None:
info['exists'] = True
else:
info['exists'] = False
pair_id = self._db.add_pair_to_workspace(workspace_id, pm_pair_id)
info['id'] = pair_id
self._db.disconnect()
return info
##########################################
def workspace_get_info(self, workspace_id=None):
self._db.connect()
if workspace_id is not None and self._db.is_valid_workspace_id(workspace_id) is False:
self._logger.error("Invalid Workspace ID ("+str(workspace_id)+")")
self._db.disconnect()
return None
workspace_info = self._db.get_workspace_info(workspace_id)
self._db.disconnect()
return workspace_info
##########################################
def workspace_get_doc(self, workspace_id):
self._db.connect()
if workspace_id is not None and self._db.is_valid_workspace_id(workspace_id) is False:
self._logger.error("Invalid Workspace ID ("+str(workspace_id)+")")
self._db.disconnect()
return None
workspace_info = self._db.get_workspace_doc(workspace_id)
self._db.disconnect()
return workspace_info
##########################################
def workspace_get_users(self, workspace_id):
self._db.connect()
if workspace_id is not None and self._db.is_valid_workspace_id(workspace_id) is False:
self._logger.error("Invalid Workspace ID ("+str(workspace_id)+")")
self._db.disconnect()
return None
workspace_info = self._db.get_workspace_users(workspace_id)
self._db.disconnect()
return workspace_info
##########################################
def workspace_get_pairs(self, workspace_id):
self._db.connect()
if workspace_id is not None and self._db.is_valid_workspace_id(workspace_id) is False:
self._logger.error("Invalid Workspace ID ("+str(workspace_id)+")")
self._db.disconnect()
return None
workspace_info = self._db.get_workspace_pairs(workspace_id)
self._db.disconnect()
return workspace_info
##########################################
def workspace_get_jobs(self, workspace_id, search_params={}):
self._db.connect()
if self._db.is_valid_workspace_id(workspace_id) is False:
self._logger.error("Invalid Workspace ID ("+str(workspace_id)+")")
self._db.disconnect()
return None
workspace_info = self._db.get_workspace_jobs(workspace_id, search_params)
self._db.disconnect()
return workspace_info
##########################################
# PCE Management
##########################################
def pce_get_all_ids(self):
self._db.connect()
info = self._db.get_all_pce_ids()
self._db.disconnect()
return info
def pce_add_if_new(self, data):
self._db.connect()
info = {}
pce_id = self._db.get_pce_id(data)
if pce_id is not None:
info['exists'] = True
else:
info['exists'] = False
pce_id = self._db.add_pce(data)
pce_state = self._db.get_pce_state(pce_id)
info['id'] = pce_id
info['state'] = pce_state
self._db.disconnect()
return info
##########################################
def pce_lookup(self, name):
self._db.connect()
pce_id = self._db.get_pce_id(name)
self._db.disconnect()
return pce_id
##########################################
def pce_add(self, name):
self._db.connect()
pce_id = self._db.add_pce(name)
self._db.disconnect()
return pce_id
##########################################
def pce_add_module(self, pce_id, module_id, src_location_type='local', src_location_path='' ):
self._db.connect()
info = {}
if self._db.is_valid_pce_id(pce_id) is False:
info['error_msg'] = "Invalid PCE ID ("+str(pce_id)+")"
self._db.disconnect()
return info
if self._db.is_valid_module_id(module_id) is False:
info['error_msg'] = "Invalid Module ID ("+str(module_id)+")"
self._db.disconnect()
return info
pair_id = self._db.lookup_module_in_pce(pce_id, module_id)
if pair_id is not None:
info['exists'] = True
else:
info['exists'] = False
pair_id = self._db.add_module_to_pce(pce_id, module_id, src_location_type, src_location_path)
info['id'] = pair_id
self._db.disconnect()
return info
##########################################
def pce_update_module_state(self, pce_id, module_id, state):
self._db.connect()
pm_pair_id = self._db.lookup_module_in_pce(pce_id, module_id)
if pm_pair_id is None:
self._logger.error("Invalid Module / PCE Pair (module="+str(module_id)+", pce="+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.update_pce_module_state(pce_id, module_id, state)
self._db.disconnect()
return pce_info
##########################################
def pce_get_info(self, pce_id=None):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid PCE ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.get_pce_info(pce_id)
self._db.disconnect()
return pce_info
##########################################
def pce_get_state(self, pce_id):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid PCE ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_state = self._db.get_pce_state(pce_id)
self._db.disconnect()
return pce_state
##########################################
def pce_update_state(self, pce_id, state):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid PCE ID ("+str(pce_id)+")")
self._db.disconnect()
return None
if state not in self._db.pce_states:
self._logger.error("Invalid PCE State ("+str(state)+")")
self._db.disconnect()
return None
pce_info = self._db.update_pce_state(pce_id, state)
self._db.disconnect()
return pce_info
##########################################
def pce_get_doc(self, pce_id):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid Pce ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.get_pce_doc(pce_id)
self._db.disconnect()
return pce_info
##########################################
def pce_get_workspaces(self, pce_id):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid Pce ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.get_pce_workspaces(pce_id)
self._db.disconnect()
return pce_info
##########################################
def pce_get_modules(self, pce_id, module_id = None):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid Pce ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.get_pce_modules(pce_id, module_id)
# Add the string representation of each of the module states
pce_info["fields"] = list(pce_info["fields"])
pce_info["fields"].append("state_str")
self._logger.debug("module_id ("+str(module_id)+")")
if module_id is None:
data = []
for m in pce_info["data"]:
m = list(m)
sstr = self.get_module_state_str( m[2] )
m.append( sstr )
data.append(m)
pce_info["data"] = data
else:
pce_info["data"] = list(pce_info["data"])
pce_info["data"].append( self.get_module_state_str( pce_info["data"][2] ) )
self._db.disconnect()
return pce_info
##########################################
def pce_get_jobs(self, pce_id, search_params={}):
self._db.connect()
if pce_id is not None and self._db.is_valid_pce_id(pce_id) is False:
self._logger.error("Invalid Pce ID ("+str(pce_id)+")")
self._db.disconnect()
return None
pce_info = self._db.get_pce_jobs(pce_id, search_params)
self._db.disconnect()
return pce_info
##########################################
# Module Management
##########################################
def module_add_if_new(self, name):
self._db.connect()
info = {}
module_id = self._db.get_module_id(name)
if module_id is not None:
info['exists'] = True
else:
info['exists'] = False
module_id = self._db.add_module(name)
info['id'] = module_id
self._db.disconnect()
return info
##########################################
def module_lookup(self, name):
self._db.connect()
module_id = self._db.get_module_id(name)
self._db.disconnect()
return int(module_id)
##########################################
def module_add(self, name):
self._db.connect()
module_id = self._db.add_module(name)
self._db.disconnect()
return module_id
##########################################
def module_get_info(self, module_id=None):
self._db.connect()
if module_id is not None and self._db.is_valid_module_id(module_id) is False:
self._logger.error("Invalid Module ID ("+str(module_id)+")")
self._db.disconnect()
return None
module_info = self._db.get_module_info(module_id)
self._db.disconnect()
return module_info
##########################################
def module_get_doc(self, module_id):
self._db.connect()
if module_id is not None and self._db.is_valid_module_id(module_id) is False:
self._logger.error("Invalid Module ID ("+str(module_id)+")")
self._db.disconnect()
return None
module_info = self._db.get_module_doc(module_id)
self._db.disconnect()
return module_info
##########################################
def module_get_pces(self, module_id):
self._db.connect()
if module_id is not None and self._db.is_valid_module_id(module_id) is False:
self._logger.error("Invalid Module ID ("+str(module_id)+")")
self._db.disconnect()
return None
module_info = self._db.get_module_pces(module_id)
self._db.disconnect()
return module_info
##########################################
def module_get_jobs(self, module_id, search_params={}):
self._db.connect()
if module_id is not None and self._db.is_valid_module_id(module_id) is False:
self._logger.error("Invalid Module ID ("+str(module_id)+")")
self._db.disconnect()
return None
module_info = self._db.get_module_jobs(module_id, search_params)
self._db.disconnect()
return module_info
##########################################
# Job Management
##########################################
def job_add(self, user_id, workspace_id, pce_id, module_id, job_data):
self._db.connect()
# See if already exists
job_id = self._db.find_job_id(user_id, workspace_id, pce_id, module_id, job_data['job_name'])
if job_id is not None:
return (True, job_id)
# Make sure this is a good tuple (allowed to submit the job)
# Check: The User is in the Workspace
if self._db.is_valid_user_workspace(user_id, workspace_id) is False:
self._logger.error("Invalid User ID ("+str(user_id)+") and Workspace ID ("+str(workspace_id)+") combo")
self._db.disconnect()
return None
# Check: The Workspace is allowed to interact with this PCE / Module pair
if self._db.is_valid_workspace_pce_module(workspace_id, pce_id, module_id) is False:
self._logger.error("Invalid Workspace / PCE / Module Combo ("+str(workspace_id)+" / "+str(pce_id)+" / "+str(module_id)+")")
self._db.disconnect()
return None
# Log the job
job_id = self._db.add_job(user_id, workspace_id, pce_id, module_id, job_data)
self._db.disconnect()
return (False, job_id)
##########################################
def job_get_info(self, job_id=None, search_params={}):
self._db.connect()
if job_id is not None and self._db.is_valid_job_id(job_id) is False:
self._logger.error("Invalid Job ID ("+str(job_id)+")")
self._db.disconnect()
return None
job_info = self._db.get_job_info(job_id, search_params)
self._db.disconnect()
return job_info
##########################################
def job_get_data(self, job_id ):
self._db.connect()
if self._db.is_valid_job_id(job_id) is False:
self._logger.error("Invalid Job ID ("+str(job_id)+")")
self._db.disconnect()
return None
job_info = self._db.get_job_data(job_id)
self._db.disconnect()
return job_info
##########################################
def job_update_state(self, job_id, state ):
self._db.connect()
if self._db.is_valid_job_id(job_id) is False:
self._logger.error("Invalid Job ID ("+str(job_id)+")")
self._db.disconnect()
return None
job_info = self._db.update_job_state(job_id, state)
self._db.disconnect()
return job_info
|
{
"content_hash": "e747c956648e5295b8c4591ec83e3689",
"timestamp": "",
"source": "github",
"line_count": 963,
"max_line_length": 135,
"avg_line_length": 33.56697819314642,
"alnum_prop": 0.5292807424593967,
"repo_name": "koepked/onramp",
"id": "aa97a2d206d2772fa05641434f43ee1639ac66f4",
"size": "32325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/webapp/onrampdb.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1436045"
},
{
"name": "Gnuplot",
"bytes": "701"
},
{
"name": "Groff",
"bytes": "253441"
},
{
"name": "HTML",
"bytes": "584449"
},
{
"name": "JavaScript",
"bytes": "144082"
},
{
"name": "Makefile",
"bytes": "88349"
},
{
"name": "Perl",
"bytes": "5501"
},
{
"name": "Python",
"bytes": "406605"
},
{
"name": "Shell",
"bytes": "8072"
},
{
"name": "SourcePawn",
"bytes": "120276"
},
{
"name": "TeX",
"bytes": "82592"
}
],
"symlink_target": ""
}
|
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from tempest.openstack.common.gettextutils import _
from tempest.openstack.common import importutils
from tempest.openstack.common import jsonutils
from tempest.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"tempest.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
{
"content_hash": "adfc3bfe0dd65eadcaaab9af26c2dc04",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 78,
"avg_line_length": 35.47126436781609,
"alnum_prop": 0.5876307749282473,
"repo_name": "ntymtsiv/tempest",
"id": "7bebfdbacc51db898cc85000496c4fffbc501321",
"size": "22373",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/openstack/common/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django import forms
from .models import Alias
class AliasEditForm(forms.ModelForm):
"""Alias for Alias"""
class Meta:
"""Meta for ModelForm"""
model = Alias
fields = ('name', 'url')
|
{
"content_hash": "788a60560330d2deae4203bc0bf04f69",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.6264150943396226,
"repo_name": "genonfire/bbgo",
"id": "7b81ab589bc98476d0d56a40a6aa218cbe4239b3",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliases/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "266"
},
{
"name": "JavaScript",
"bytes": "20212"
},
{
"name": "Python",
"bytes": "9311"
},
{
"name": "Shell",
"bytes": "126"
},
{
"name": "Vue",
"bytes": "937"
}
],
"symlink_target": ""
}
|
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
import math # match functions
from low import * # custom functions, written by myself
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f fasta file to import" )
stdout( " -p prefix to put in fron of the key" )
stdout( " -d delimiter (default: space | allowed: ; , tab space" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:p:d:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['file'] = value
if key == '-p': args['prefix'] = value
if key == '-d': args['delimiter'] = value
if not args.has_key('file'):
stderr( "import file argument missing." )
show_help()
elif not file_exists( args.get('file') ):
stderr( "import file does not exist." )
show_help()
if not args.has_key('delimiter') or args.get('delimiter') not in [ ";", ",", "tab", "space" ]:
args['delimiter'] = 'space'
return args
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
fo = open( args.get('file') )
oldid = ""
for line in fo:
line = line.rstrip()
if args.get('delimiter') == "tab":
columns = line.split("\t")
elif args.get('delimiter') == "space":
columns = line.split()
else:
columns = line.split( args.get('delimiter') )
id = columns[0]
if id != oldid:
oldid = id
if args.has_key('prefix'):
print ">" + args.get('prefix') + id
else:
print ">" + id
print string.join( columns[1:], "\t" )
fo.close()
# =============================================================================
args = handle_arguments()
main( args )
|
{
"content_hash": "1a970bcc5d9bd96d6698ea588adfc1c2",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 33.54320987654321,
"alnum_prop": 0.48325358851674644,
"repo_name": "lotharwissler/bioinformatics",
"id": "a7121bf365a898910be047c8dcbf9d2210dd8fa2",
"size": "2736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/generic/flat2xdom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "618072"
},
{
"name": "Ruby",
"bytes": "40829"
}
],
"symlink_target": ""
}
|
"""This module contains the Sponsor Model."""
__authors__ = [
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
import soc.models.group
class Sponsor(soc.models.group.Group):
"""Sponsor details."""
pass
|
{
"content_hash": "6a9170be6a1b31373236fc5690b1c1da",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 15,
"alnum_prop": 0.6619047619047619,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "cac53a3164f7d0dfd09c6db922249db76cf38793",
"size": "822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/melange/src/soc/models/sponsor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import imgaug as ia
import imgaug.augmenters as iaa
def main():
aug = iaa.Cutout(fill_mode=["gaussian", "constant"], cval=(0, 255),
fill_per_channel=0.5)
image = ia.data.quokka()
images_aug = aug(images=[image] * 16)
ia.imshow(ia.draw_grid(images_aug, cols=4, rows=4))
if __name__ == "__main__":
main()
|
{
"content_hash": "876aacb19e5f5406e927e4d666c34b2d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 27.533333333333335,
"alnum_prop": 0.6150121065375302,
"repo_name": "aleju/imgaug",
"id": "c99df7b680b7a927f6750cda79332e938774303c",
"size": "413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "checks/check_cutout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5690574"
},
{
"name": "Shell",
"bytes": "609"
}
],
"symlink_target": ""
}
|
from collections import MutableMapping
try:
from thread import get_ident
except ImportError:
try:
from threading import _get_ident as get_ident
except ImportError:
from threading import get_ident
def _recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {0!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {0!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
|
{
"content_hash": "e240b6319d4f4f0b6a2f081f71c2fda4",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 97,
"avg_line_length": 33.62043795620438,
"alnum_prop": 0.5822839774207555,
"repo_name": "GoogleCloudPlatform/python-compat-runtime",
"id": "2e28de848f4577123e4a94e7a23ad8d6fa4bf838",
"size": "4606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine-vmruntime/vmruntime/chainmap/chainmap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30211"
},
{
"name": "HTML",
"bytes": "171272"
},
{
"name": "JavaScript",
"bytes": "414229"
},
{
"name": "Makefile",
"bytes": "2138"
},
{
"name": "PHP",
"bytes": "3132250"
},
{
"name": "Python",
"bytes": "11709249"
},
{
"name": "Shell",
"bytes": "1787"
}
],
"symlink_target": ""
}
|
import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("php-yii2/app/index.php", "localhost", ""+ args.database_host +"")
setup_util.replace_text("php-yii2/deploy/nginx.conf", "root .*\/FrameworkBenchmarks/php-yii2", "root " + args.troot)
try:
if os.name == 'nt':
subprocess.check_call('icacls "C:\\FrameworkBenchmarks\\php-yii2" /grant "IIS_IUSRS:(OI)(CI)F"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\FrameworkBenchmarks\\php-yii2\\app"', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.check_call("sudo chown -R www-data:www-data php-yii2", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
subprocess.call('appcmd delete site PHP', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo chown -R $USER:$USER php-yii2", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
|
{
"content_hash": "b873c2b3e3e01404d6a0f95f3effdae1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 178,
"avg_line_length": 55.29032258064516,
"alnum_prop": 0.7170361726954493,
"repo_name": "ratpack/FrameworkBenchmarks",
"id": "1dda2f1631b7d0ab76906f509647d2b888d0798b",
"size": "1714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frameworks/PHP/php-yii2/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "C",
"bytes": "39732"
},
{
"name": "C#",
"bytes": "128703"
},
{
"name": "C++",
"bytes": "402630"
},
{
"name": "CSS",
"bytes": "234858"
},
{
"name": "Clojure",
"bytes": "18787"
},
{
"name": "Dart",
"bytes": "35750"
},
{
"name": "Elixir",
"bytes": "1912"
},
{
"name": "Erlang",
"bytes": "7670"
},
{
"name": "Go",
"bytes": "35314"
},
{
"name": "Groovy",
"bytes": "15587"
},
{
"name": "Haskell",
"bytes": "8771"
},
{
"name": "Java",
"bytes": "264212"
},
{
"name": "JavaScript",
"bytes": "395155"
},
{
"name": "Lua",
"bytes": "7463"
},
{
"name": "MoonScript",
"bytes": "2204"
},
{
"name": "Nim",
"bytes": "32032"
},
{
"name": "PHP",
"bytes": "17587921"
},
{
"name": "Perl",
"bytes": "18774"
},
{
"name": "PowerShell",
"bytes": "35514"
},
{
"name": "Prolog",
"bytes": "317"
},
{
"name": "Python",
"bytes": "413446"
},
{
"name": "Racket",
"bytes": "5298"
},
{
"name": "Ruby",
"bytes": "73849"
},
{
"name": "Scala",
"bytes": "62267"
},
{
"name": "Shell",
"bytes": "114520"
},
{
"name": "Volt",
"bytes": "677"
}
],
"symlink_target": ""
}
|
import sys
import os
import logging
topdir = os.path.dirname(os.path.realpath(__file__)) + "../.."
topdir = os.path.realpath(topdir)
sys.path.insert(0, topdir)
from execformat.executor import execUtils, OperationFailed
from vyos_session import utils
logger = logging.getLogger(__name__)
utils.init_logger(logger)
class configOpts(object):
def __init__(self):
pass
def set_1(self, args):
exe=execUtils(list(args))
exe.execmd()
def delete_1(self, args):
exe=execUtils(list(args))
exe.execmd()
def show(self, args):
exe=execUtils(list(args))
res,output=exe.execmd(nonsession=True)
return res,output
def set(self, args):
args.insert(0, 'set')
exe=execUtils(list(args))
try:
exe.execmd()
return True
except OperationFailed, e:
logger.error(e.message)
return False
def delete(self, args):
args.insert(0, 'delete')
exe=execUtils(list(args))
try:
exe.execmd()
return True
except OperationFailed, e:
logger.error(e.message)
return False
|
{
"content_hash": "5476f8cf32c4f413f5b1b497f13969b3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 62,
"avg_line_length": 23.352941176470587,
"alnum_prop": 0.5852225020990764,
"repo_name": "jiahaoliang/group-based-policy",
"id": "849124a9a2a9631eaa995a7b0a1781787a472da7",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/lbaasv2-mitaka-pull-request",
"path": "gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/operations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1741199"
},
{
"name": "Shell",
"bytes": "27976"
}
],
"symlink_target": ""
}
|
"""
Clever API
The Clever API
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SectionResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'Section'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None):
"""
SectionResponse - a model defined in Swagger
"""
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""
Gets the data of this SectionResponse.
:return: The data of this SectionResponse.
:rtype: Section
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this SectionResponse.
:param data: The data of this SectionResponse.
:type: Section
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "b3ebb9a0956f5b8bb37348b20f124c67",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 23.5327868852459,
"alnum_prop": 0.5043538836642285,
"repo_name": "Clever/clever-python",
"id": "2be7a3477ba40e35cbd110745eedba381d6c2372",
"size": "2888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clever/models/section_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Python",
"bytes": "760865"
},
{
"name": "Shell",
"bytes": "2015"
}
],
"symlink_target": ""
}
|
"""
Add external identity tables.
Revision ID: 24ab8d11f014
Revises: 2bb1ba973f0b
Create Date: 2011-11-10 23:18:19.446844
"""
from __future__ import unicode_literals
import sqlalchemy as sa # noqa: F401
from alembic import op # noqa: F401
# Revision identifiers, used by Alembic.
# pylint: disable=C0103,invalid-name # revision control variables not uppercase
revision = "24ab8d11f014"
down_revision = "2bb1ba973f0b"
def upgrade():
op.create_table("external_identities",
sa.Column("external_id", sa.Unicode(255), primary_key=True),
sa.Column("external_user_name", sa.Unicode(50), default=""),
sa.Column("local_user_name", sa.Unicode(50),
sa.ForeignKey("users.user_name",
onupdate="CASCADE",
ondelete="CASCADE"),
primary_key=True),
sa.Column("provider_name", sa.Unicode(50), default="",
primary_key=True),
sa.Column("access_token", sa.Unicode(255), default=""),
sa.Column("alt_token", sa.Unicode(255), default=""),
sa.Column("token_secret", sa.Unicode(255), default="")
)
def downgrade():
op.drop_table("external_identities")
|
{
"content_hash": "36d43b345cae57515f8b5999401e1abb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 37.32432432432432,
"alnum_prop": 0.5467052860246199,
"repo_name": "Ouranosinc/Magpie",
"id": "4a8a0db94a690a07f5c3b92623c56f873d376597",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magpie/alembic/versions/2011-11-10_24ab8d11f014_external_identities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28610"
},
{
"name": "Dockerfile",
"bytes": "1491"
},
{
"name": "JavaScript",
"bytes": "1557"
},
{
"name": "Makefile",
"bytes": "30522"
},
{
"name": "Mako",
"bytes": "138010"
},
{
"name": "Python",
"bytes": "2200847"
},
{
"name": "Shell",
"bytes": "1250"
}
],
"symlink_target": ""
}
|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/service -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_service
short_description: Create, modify, and idempotently manage openshift services.
description:
- Manage openshift service objects programmatically.
options:
state:
description:
- State represents whether to create, modify, delete, or list
required: False
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
selector:
description:
- The selector to apply when filtering for services.
required: false
default: None
aliases: []
labels:
description:
- The labels to apply on the service.
required: false
default: None
aliases: []
clusterip:
description:
- The cluster ip address to use with this service.
required: false
default: None
aliases: []
portalip:
description:
- The portal ip(virtual ip) address to use with this service.
- "https://docs.openshift.com/enterprise/3.0/architecture/core_concepts/pods_and_services.html#services"
required: false
default: None
aliases: []
ports:
description:
- A list of the ports that are used for this service. This includes name, port, protocol, and targetPort.
- See examples.
required: false
default: None
aliases: []
session_affinity:
description:
- The type of session affinity to use.
required: false
default: 'None'
aliases: []
service_type:
description:
- The type of service desired. Each option tells the service to behave accordingly.
- https://kubernetes.io/docs/user-guide/services/
required: false
default: ClusterIP
choices:
- ClusterIP
- NodePort
- LoadBalancer
- ExternalName
aliases: []
externalips:
description:
- A list of the external IPs that are exposed for this service.
- https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get docker-registry service
run_once: true
oc_service:
namespace: default
name: docker-registry
state: list
register: registry_service_out
- name: create the docker-registry service
oc_service:
namespace: default
name: docker-registry
ports:
- name: 5000-tcp
port: 5000
protocol: TCP
targetPort: 5000
selector:
docker-registry: default
session_affinity: ClientIP
service_type: ClusterIP
register: svc_out
notify:
- restart openshift master services
'''
# -*- -*- -*- End included fragment: doc/service -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCService(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'service'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
labels,
selector,
cluster_ip,
portal_ip,
ports,
session_affinity,
service_type,
external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type,
external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@property
def service(self):
''' property function service'''
if not self.svc:
self.get()
return self.svc
@service.setter
def service(self, data):
''' setter function for service var '''
self.svc = data
def exists(self):
''' return whether a service exists '''
if self.service:
return True
return False
def get(self):
'''return service information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service = Service(content=result['results'][0])
result['clusterip'] = self.service.get('spec.clusterIP')
elif 'services \"%s\" not found' % self.config.name in result['stderr']:
result['clusterip'] = ''
result['returncode'] = 0
return result
def delete(self):
'''delete the service'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create a service '''
return self._create_from_content(self.config.name, self.user_svc.yaml_dict)
def update(self):
'''create a service '''
# Need to copy over the portalIP and the serviceIP settings
self.user_svc.add_cluster_ip(self.service.get('spec.clusterIP'))
self.user_svc.add_portal_ip(self.service.get('spec.portalIP'))
return self._replace_content(self.kind, self.config.name, self.user_svc.yaml_dict)
def needs_update(self):
''' verify an update is needed '''
skip = ['clusterIP', 'portalIP']
return not Utils.check_def_equal(self.user_svc.yaml_dict, self.service.yaml_dict, skip_keys=skip, debug=True)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode):
'''Run the idempotent ansible code'''
oc_svc = OCService(params['name'],
params['namespace'],
params['labels'],
params['selector'],
params['clusterip'],
params['portalip'],
params['ports'],
params['session_affinity'],
params['service_type'],
params['external_ips'],
params['kubeconfig'],
params['debug'])
state = params['state']
api_rval = oc_svc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if oc_svc.exists():
if check_mode:
return {'changed': True,
'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
api_rval = oc_svc.delete()
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_svc.exists():
if check_mode:
return {'changed': True,
'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
# Create it here
api_rval = oc_svc.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_svc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_svc.needs_update():
api_rval = oc_svc.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_svc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'UNKNOWN state passed. [%s]' % state}
# -*- -*- -*- End included fragment: class/oc_service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_service.py -*- -*- -*-
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
labels=dict(default=None, type='dict'),
selector=dict(default=None, type='dict'),
clusterip=dict(default=None, type='str'),
portalip=dict(default=None, type='str'),
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCService.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_service.py -*- -*- -*-
|
{
"content_hash": "9f02cdf5a7b84b415ad460421c6ecf9b",
"timestamp": "",
"source": "github",
"line_count": 1850,
"max_line_length": 118,
"avg_line_length": 33.23567567567567,
"alnum_prop": 0.5317145366424877,
"repo_name": "DG-i/openshift-ansible",
"id": "308f454880b6ccc4a55c0cc4aaf8e468f20e3444",
"size": "62648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/lib_openshift/library/oc_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4999"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "Python",
"bytes": "3197455"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "80962"
}
],
"symlink_target": ""
}
|
"""
This module contains Google Search Ads sensor.
"""
from typing import Dict, Optional
from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class GoogleSearchAdsReportSensor(BaseSensorOperator):
"""
Polls for the status of a report request.
.. seealso::
For API documentation check:
https://developers.google.com/search-ads/v2/reference/reports/get
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsReportSensor`
:param report_id: ID of the report request being polled.
:type report_id: str
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_id",)
@apply_defaults
def __init__(
self,
report_id: str,
api_version: str = "v2",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
mode: str = "reschedule",
poke_interval: int = 5 * 60,
*args,
**kwargs
):
super().__init__(mode=mode, poke_interval=poke_interval, *args, **kwargs)
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def poke(self, context: Dict):
hook = GoogleSearchAdsHook(
gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version
)
self.log.info('Checking status of %s report.', self.report_id)
response = hook.get(report_id=self.report_id)
return response['isReportReady']
|
{
"content_hash": "94b227c1316f1354b4c5ec0f4fcc1c7d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 107,
"avg_line_length": 35.81666666666667,
"alnum_prop": 0.6626337831549558,
"repo_name": "mtagle/airflow",
"id": "1853825941ffdc46058a8b081d99c810f56a9496",
"size": "2936",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/providers/google/marketing_platform/sensors/search_ads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
ProgramName = 'getCpGoe.py'
LastUpdated = '10/16/14'
By = 'Groves Dixon'
VersionNumber = '1.0'
print "\nRunning Program {}...".format(ProgramName)
VersionString = '{} version {} Last Updated {} by {}'.format(ProgramName, VersionNumber, LastUpdated, By)
Description = '''
Description:
This program reads though a set of nucleotide sequences and returns a table with CpGoe values
and other relevent data.
'''
AdditionalProgramInfo = '''
Additional Program Information:
'''
##Import Modules
import time
import argparse
from sys import argv
from sys import exit
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
Start_time = time.time() ##keeps track of how long the script takes to run
##Set Up Argument Parsing
parser = argparse.ArgumentParser(description=Description, epilog=AdditionalProgramInfo) ##create argument parser that will automatically return help texts from global variables above
parser.add_argument('-i', required = False, dest = 'input', help = 'The the name of the file with the gene body sequences')
parser.add_argument('-sub', required = False, default = 'False', dest = 'subset', help = 'optional integer argument to take just the first part of genes. Designates how much to take.')
parser.add_argument('-output', '-o', required = True, dest = 'out', help = 'The desired name for the output file')
parser.add_argument('-cds', required = False, dest = 'cds', help = "Tell whether this is a CDS file or not, these are Misha's protein coding nucleotide sequences. Assign as 'y' for yes or leave blank")
args = parser.parse_args()
#Assign Arguments
inputFile = args.input
outfileName = args.out
subset = args.subset
CDS = args.cds
def read_genome(inputFile, outfileName, subset):
with open(outfileName, 'w') as out:
header = "EST\tC\tG\tT\tCpG\tGpC\tTpG\tlength"
out.write(header)
geneLengths = {}
isogroupList = []
dataDict = {}
for seqRecord in SeqIO.parse(inputFile, "fasta"):
if subset != 'False':
subset = int(subset)
seqRecord = seqRecord[0:subset]
if CDS == 'y':
geneName = seqRecord.description.split()[2]
geneNumber = geneName.split('isogroup')[1]
geneName = "isogroup=" + geneNumber
print seqRecord.description
print geneName
else:
geneName = seqRecord.id
length = len(seqRecord.seq)
seq = seqRecord.seq
seq = seq.upper() ##make sure all nucleotide letters are upper case
Tcount = seq.count('T')
Ccount = seq.count('C')
Gcount = seq.count('G')
Ncount = seq.count('N')
CGcount = seq.count('CG')
GCcount = seq.count('GC')
TGcount = seq.count('TG')
length = length - Ncount ##don't include Ns in the length measure for getting CpGoe because they had no opportunity to be either one
if CDS == 'y':
try:
if geneLengths[geneName] > length:
continue ##this way we skip repeats, selecting the longer of the two repeats for an isogroup
except KeyError:
geneLengths[geneName] = length
dataList = [geneName, Ccount, Gcount, Tcount, CGcount, GCcount, TGcount, length]
stringList = []
for i in dataList:
stringList.append(str(i))
dataString = '\t'.join(stringList)
dataDict[geneName] = dataString #this will replace the shorter one for a duplicate if it was recorded, so we always keep the longer gene
for gene in dataDict.keys():
dataString = dataDict[gene]
out.write('\n' + dataString)
read_genome(inputFile, outfileName, subset)
Time = time.time() - Start_time
print('\nTime took to run: {}'.format(Time))
|
{
"content_hash": "a2d93e26c4bb015ec9dd8faf95a742fe",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 201,
"avg_line_length": 40.04040404040404,
"alnum_prop": 0.6321897073662966,
"repo_name": "grovesdixon/CpGoe",
"id": "8f5a54c3e2f1ced7b290e810e3e0ad565ca26317",
"size": "4034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "getCpGoe.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4034"
},
{
"name": "R",
"bytes": "59864"
}
],
"symlink_target": ""
}
|
from unittest.mock import patch, sentinel, MagicMock
from qcat.tests import TestCase
from summary.parsers.questionnaire import QuestionnaireParser
from summary.renderers.summary import SummaryRenderer, GlobalValuesMixin
class SummaryDataProviderTest(TestCase):
def test_summary_type(self):
with self.assertRaises(NotImplementedError):
SummaryRenderer(config='', questionnaire='', base_url='', quality='screen')
@patch.object(QuestionnaireParser, '__init__')
def test_content(self, mock_raw_data):
mock_raw_data.return_value = {}
with self.assertRaises(NotImplementedError):
SummaryRenderer(
config='', questionnaire='', base_url='', quality='screen'
)
class GlobalValuesMixinTest(TestCase):
def setUp(self):
class Tmp(GlobalValuesMixin, SummaryRenderer):
summary_type = 'type'
content = ['sample']
def sample(self):
return sentinel.sample_value
self.obj = Tmp(
config=MagicMock(), questionnaire='', base_url='', quality='screen'
)
def test_raw_data_getter(self):
# data as structured by the configured questionnaire summary
self.obj.raw_data = {'key': [{'value': sentinel.expected}]}
self.assertEqual(
self.obj.raw_data_getter('key'),
sentinel.expected
)
def test_raw_data_getter_custom_value(self):
self.obj.raw_data = {'key': [{'value': sentinel.expected}]}
self.assertEqual(
self.obj.raw_data_getter('key', value=''),
[{'value': sentinel.expected}]
)
def test_string_from_list(self):
self.obj.raw_data = {'key': [{'values': ['will', 'i', 'am']}]}
self.assertEqual(
self.obj.string_from_list('key'),
'will, i, am'
)
|
{
"content_hash": "0535fbbe9b4b8d1d72c832ce8917ac08",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 87,
"avg_line_length": 33.464285714285715,
"alnum_prop": 0.6061899679829242,
"repo_name": "CDE-UNIBE/qcat",
"id": "1f5e87ce167ae4f99b3b6b2c7dc7048a5cc87861",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/summary/tests/test_renderers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1098"
},
{
"name": "HTML",
"bytes": "823938"
},
{
"name": "Handlebars",
"bytes": "224139"
},
{
"name": "JavaScript",
"bytes": "153067"
},
{
"name": "Python",
"bytes": "3515948"
},
{
"name": "SCSS",
"bytes": "165400"
},
{
"name": "Shell",
"bytes": "1943"
}
],
"symlink_target": ""
}
|
import logging
import re
from telemetry import decorators
from telemetry.timeline import trace_data as trace_data_module
from telemetry.core.backends.chrome_inspector import devtools_http
from telemetry.core.backends.chrome_inspector import inspector_backend
from telemetry.core.backends.chrome_inspector import tracing_backend
from telemetry.core.platform.tracing_agent import chrome_tracing_agent
def IsDevToolsAgentAvailable(port):
"""Returns True if a DevTools agent is available on the given port."""
return _IsDevToolsAgentAvailable(devtools_http.DevToolsHttp(port))
# TODO(nednguyen): Find a more reliable way to check whether the devtool agent
# is still alive.
def _IsDevToolsAgentAvailable(devtools_http_instance):
try:
devtools_http_instance.Request('')
except devtools_http.DevToolsClientConnectionError:
return False
else:
return True
class DevToolsClientBackend(object):
def __init__(self, devtools_port, remote_devtools_port, app_backend):
"""Creates a new DevToolsClientBackend.
A DevTools agent must exist on the given devtools_port.
Args:
devtools_port: The port to use to connect to DevTools agent.
remote_devtools_port: In some cases (e.g., app running on
Android device, devtools_port is the forwarded port on the
host platform. We also need to know the remote_devtools_port
so that we can uniquely identify the DevTools agent.
app_backend: For the app that contains the DevTools agent.
"""
self._devtools_port = devtools_port
self._remote_devtools_port = remote_devtools_port
self._devtools_http = devtools_http.DevToolsHttp(devtools_port)
self._tracing_backend = None
self._app_backend = app_backend
self._devtools_context_map_backend = _DevToolsContextMapBackend(
self._app_backend, self)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
self, self._app_backend.platform_backend)
@property
def remote_port(self):
return self._remote_devtools_port
# TODO(chrishenry): This is temporarily exposed during DevTools code
# refactoring. Please do not introduce new usage! crbug.com/423954
@property
def devtools_http(self):
return self._devtools_http
def IsAlive(self):
"""Whether the DevTools server is available and connectable."""
return _IsDevToolsAgentAvailable(self.devtools_http)
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
@decorators.Cache
def GetChromeBranchNumber(self):
# Detect version information.
resp = self._devtools_http.RequestJson('version')
if 'Protocol-Version' in resp:
if 'Browser' in resp:
branch_number_match = re.search(r'Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
if branch_number_match:
branch_number = int(branch_number_match.group(1))
if branch_number:
return branch_number
# Branch number can't be determined, so fail any branch number checks.
return 0
# TODO(chrishenry): This is exposed tempoarily during DevTools code
# refactoring. Instead, we should expose InspectorBackendList or
# equivalent. crbug.com/423954.
def ListInspectableContexts(self):
return self._devtools_http.RequestJson('')
def GetUpdatedInspectableContexts(self):
"""Returns an updated instance of _DevToolsContextMapBackend."""
contexts = self.ListInspectableContexts()
self._devtools_context_map_backend._Update(contexts)
return self._devtools_context_map_backend
def _CreateTracingBackendIfNeeded(self):
if not self._tracing_backend:
self._tracing_backend = tracing_backend.TracingBackend(
self._devtools_port)
def IsChromeTracingSupported(self):
self._CreateTracingBackendIfNeeded()
return self._tracing_backend.IsTracingSupported()
def StartChromeTracing(
self, trace_options, custom_categories=None, timeout=10):
"""
Args:
trace_options: An tracing_options.TracingOptions instance.
custom_categories: An optional string containing a list of
comma separated categories that will be traced
instead of the default category set. Example: use
"webkit,cc,disabled-by-default-cc.debug" to trace only
those three event categories.
"""
assert trace_options and trace_options.enable_chrome_trace
self._CreateTracingBackendIfNeeded()
return self._tracing_backend.StartTracing(
trace_options, custom_categories, timeout)
def StopChromeTracing(self, trace_data_builder, timeout=30):
context_map = self.GetUpdatedInspectableContexts()
for context in context_map.contexts:
context_id = context['id']
backend = context_map.GetInspectorBackend(context_id)
success = backend.EvaluateJavaScript(
"console.time('" + backend.id + "');" +
"console.timeEnd('" + backend.id + "');" +
"console.time.toString().indexOf('[native code]') != -1;")
if not success:
raise Exception('Page stomped on console.time')
trace_data_builder.AddEventsTo(
trace_data_module.TAB_ID_PART, [backend.id])
assert self._tracing_backend
return self._tracing_backend.StopTracing(trace_data_builder, timeout)
class _DevToolsContextMapBackend(object):
def __init__(self, app_backend, devtools_client):
self._app_backend = app_backend
self._devtools_client = devtools_client
self._contexts = None
self._inspector_backends_dict = {}
@property
def contexts(self):
"""The most up to date contexts data.
Returned in the order returned by devtools agent."""
return self._contexts
def GetContextInfo(self, context_id):
for context in self._contexts:
if context['id'] == context_id:
return context
raise KeyError('Cannot find a context with id=%s' % context_id)
def GetInspectorBackend(self, context_id):
"""Gets an InspectorBackend instance for the given context_id.
This lazily creates InspectorBackend for the context_id if it does
not exist yet. Otherwise, it will return the cached instance."""
if context_id in self._inspector_backends_dict:
return self._inspector_backends_dict[context_id]
for context in self._contexts:
if context['id'] == context_id:
new_backend = inspector_backend.InspectorBackend(
self._app_backend.app, self._devtools_client, context)
self._inspector_backends_dict[context_id] = new_backend
return new_backend
raise KeyError('Cannot find a context with id=%s' % context_id)
def _Update(self, contexts):
# Remove InspectorBackend that is not in the current inspectable
# contexts list.
context_ids = [context['id'] for context in contexts]
for context_id in self._inspector_backends_dict.keys():
if context_id not in context_ids:
del self._inspector_backends_dict[context_id]
valid_contexts = []
for context in contexts:
# If the context does not have webSocketDebuggerUrl, skip it.
# If an InspectorBackend is already created for the tab,
# webSocketDebuggerUrl will be missing, and this is expected.
context_id = context['id']
if context_id not in self._inspector_backends_dict:
if 'webSocketDebuggerUrl' not in context:
logging.debug('webSocketDebuggerUrl missing, removing %s'
% context_id)
continue
valid_contexts.append(context)
self._contexts = valid_contexts
|
{
"content_hash": "caf920b80f8531aae26083670ca99d21",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 38.00487804878049,
"alnum_prop": 0.6923373122834039,
"repo_name": "hefen1/chromium",
"id": "4dc7f755b5684ff3e83df506e1f0eef14f454bb2",
"size": "7954",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/backends/chrome_inspector/devtools_client_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "C",
"bytes": "4050888"
},
{
"name": "C++",
"bytes": "227355953"
},
{
"name": "CSS",
"bytes": "970407"
},
{
"name": "HTML",
"bytes": "28896884"
},
{
"name": "Java",
"bytes": "8494381"
},
{
"name": "JavaScript",
"bytes": "19110753"
},
{
"name": "Makefile",
"bytes": "37978"
},
{
"name": "Objective-C",
"bytes": "1276474"
},
{
"name": "Objective-C++",
"bytes": "7755220"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "264470"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "423501"
},
{
"name": "Python",
"bytes": "7622149"
},
{
"name": "Shell",
"bytes": "478642"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from ebird.api.constants import DEFAULT_BACK
from tests.mixins.base import BaseMixin
class BackTestsMixin(BaseMixin):
def test_back_is_sent(self):
query = self.api_call(back=10)[1]
self.assertEqual(query["back"], 10)
def test_default_back_is_not_sent(self):
query = self.api_call(back=DEFAULT_BACK)[1]
self.assertTrue("back" not in query)
def test_invalid_back_raises_error(self):
self.api_raises(ValueError, back=31)
self.api_raises(ValueError, back="x")
|
{
"content_hash": "faa4e8fb99a6bac22877ce6f162f880d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 32.5,
"alnum_prop": 0.675,
"repo_name": "ProjectBabbler/ebird-api",
"id": "a82054bcbbc93091d6cde0c3bba2fa420fc0e4b0",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mixins/back_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5063"
},
{
"name": "Python",
"bytes": "127932"
}
],
"symlink_target": ""
}
|
"""Create and parse 'send'-type messages."""
import struct
from . import (util, config, exceptions, bitcoin, util)
FORMAT = '>QQ'
LENGTH = 8 + 8
ID = 0
def validate (db, source, destination, asset, quantity):
problems = []
if asset == config.BTC: problems.append('cannot send saffroncoins') # Only for parsing.
if not isinstance(quantity, int):
problems.append('quantity must be in satoshis')
return problems
if quantity < 0: problems.append('negative quantity')
return problems
def compose (db, source, destination, asset, quantity):
cursor = db.cursor()
# Just send BTC?
if asset == config.BTC:
return (source, [(destination, quantity)], None)
#quantity must be in int satoshi (not float, string, etc)
if not isinstance(quantity, int):
raise exceptions.SendError('quantity must be an int (in satoshi)')
# Only for outgoing (incoming will overburn).
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, asset)))
if not balances or balances[0]['quantity'] < quantity:
raise exceptions.SendError('insufficient funds')
problems = validate(db, source, destination, asset, quantity)
if problems: raise exceptions.SendError(problems)
asset_id = util.asset_id(asset)
data = config.PREFIX + struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, asset_id, quantity)
cursor.close()
return (source, [(destination, None)], data)
def parse (db, tx, message):
cursor = db.cursor()
# Unpack message.
try:
assert len(message) == LENGTH
asset_id, quantity = struct.unpack(FORMAT, message)
asset = util.asset_name(asset_id)
status = 'valid'
except (AssertionError, struct.error) as e:
asset, quantity = None, None
status = 'invalid: could not unpack'
if status == 'valid':
# Oversend
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], asset))
balances = cursor.fetchall()
if not balances:
status = 'invalid: insufficient funds'
elif balances[0]['quantity'] < quantity:
quantity = min(balances[0]['quantity'], quantity)
if status == 'valid':
# For SQLite3
quantity = min(quantity, config.MAX_INT)
problems = validate(db, tx['source'], tx['destination'], asset, quantity)
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
util.debit(db, tx['block_index'], tx['source'], asset, quantity, event=tx['tx_hash'])
util.credit(db, tx['block_index'], tx['destination'], asset, quantity, event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'destination': tx['destination'],
'asset': asset,
'quantity': quantity,
'status': status,
}
sql='insert into sends values(:tx_index, :tx_hash, :block_index, :source, :destination, :asset, :quantity, :status)'
cursor.execute(sql, bindings)
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
{
"content_hash": "fdcb4e8f29c2e6aae8817ddbfa098a9d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 120,
"avg_line_length": 33.32673267326733,
"alnum_prop": 0.6173499702911468,
"repo_name": "saffroncoin/csfrd",
"id": "f5e70552a86fbfd0ef5edc18842337dc94569b44",
"size": "3389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/send.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "364358"
}
],
"symlink_target": ""
}
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:6479")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
{
"content_hash": "d3f5f07247c6d62fac6fe49afd91557a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 39.25,
"alnum_prop": 0.7643312101910829,
"repo_name": "CarpeDiemCoin/CarpeDiemLaunch",
"id": "cf3e0e5be54a70eb32a567c1f234697b5eaa6602",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "101891"
},
{
"name": "C++",
"bytes": "2425434"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "13802"
},
{
"name": "Objective-C",
"bytes": "2734"
},
{
"name": "Python",
"bytes": "37459"
},
{
"name": "Shell",
"bytes": "2575"
},
{
"name": "TypeScript",
"bytes": "5258311"
}
],
"symlink_target": ""
}
|
import clr
import sys
clr.AddReference('ZyGames.Framework.Common')
clr.AddReference('ZyGames.Framework')
clr.AddReference('ZyGames.Framework.Game')
clr.AddReference('ZyGames.Tianjiexing.Model')
clr.AddReference('ZyGames.Tianjiexing.BLL')
clr.AddReference('ZyGames.Tianjiexing.Lang')
clr.AddReference('ZyGames.Tianjiexing.Component')
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.Component.Chat import *
#新手引导进度
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.GuideId = 0
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.PrizeStr = ''
self.isPass = 0
self.GuideId = 0
selt.RewardStatus = 0;
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
userId = parent.Current.User.PersonalId
contextUser = parent.Current.User
noviceUser = GameDataCacheSet[NoviceUser]().FindKey(userId)
if noviceUser and noviceUser.IsClose :
if((DateTime.Now- ContextUser.LoginTime).Minutes >=10):
UserItemHelper.AddUserItem(userId,1804,1)
actionResult.RewardStatus = 1
else:
actionResult.RewardStatus = 3
else:
if(noviceUser and noviceUser.IsClose == True):
actionResult.RewardStatus = 2
actionResult.generalInfo = ShareCacheStruct[NoviceTaskInfo]().FindKey(urlParam.GeneralID)
return actionResult
def buildPacket(writer, urlParam, actionResult):
writer.PushIntoStack(actionResult.RewardStatus)
return True;
|
{
"content_hash": "e81325cc20db9cb73274b2cd6899a9cd",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 93,
"avg_line_length": 34.29032258064516,
"alnum_prop": 0.7083725305738476,
"repo_name": "wenhulove333/ScutServer",
"id": "77787befa3355a88bcc15beb50348476d8995099",
"size": "2140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sample/Koudai/Server/release/Script/PyScript/Action/action1095.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "150472"
},
{
"name": "ActionScript",
"bytes": "339184"
},
{
"name": "Batchfile",
"bytes": "60466"
},
{
"name": "C",
"bytes": "3976261"
},
{
"name": "C#",
"bytes": "9481083"
},
{
"name": "C++",
"bytes": "11640198"
},
{
"name": "CMake",
"bytes": "489"
},
{
"name": "CSS",
"bytes": "13478"
},
{
"name": "Groff",
"bytes": "16179"
},
{
"name": "HTML",
"bytes": "283997"
},
{
"name": "Inno Setup",
"bytes": "28931"
},
{
"name": "Java",
"bytes": "214263"
},
{
"name": "JavaScript",
"bytes": "2809"
},
{
"name": "Lua",
"bytes": "4667522"
},
{
"name": "Makefile",
"bytes": "166623"
},
{
"name": "Objective-C",
"bytes": "401654"
},
{
"name": "Objective-C++",
"bytes": "355347"
},
{
"name": "Python",
"bytes": "1633926"
},
{
"name": "Shell",
"bytes": "101770"
},
{
"name": "Visual Basic",
"bytes": "18764"
}
],
"symlink_target": ""
}
|
from .model_base import ModelBase
class Template(ModelBase):
""" Object model for TestRail Templates
To get all templates for a project:
.. code-block:: python
target_project = traw_client.project(12) # Get project with id 12
templates_for_project = list(traw_client.template(target_project))
"""
@property
def is_default(self):
""" True if this is a default template """
return self._content.get('is_default')
@property
def name(self):
""" The name of the template """
return self._content.get('name')
|
{
"content_hash": "092579dcd1867006c0a7cae0a26d0d40",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 25.695652173913043,
"alnum_prop": 0.6294416243654822,
"repo_name": "levi-rs/traw",
"id": "c368456ae40ab5392e60730e0f502c79b18359ac",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traw/models/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "391436"
}
],
"symlink_target": ""
}
|
"""
Make a new dump file with a specified max number of timesteps and reorders atoms.
"""
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
from md_utils.md_common import (InvalidDataError, create_out_fname, warning, process_cfg,
find_dump_section_state, read_csv_dict)
try:
# noinspection PyCompatibility
from ConfigParser import ConfigParser
except ImportError:
# noinspection PyCompatibility
from configparser import ConfigParser
__author__ = 'hmayes'
# Error Codes
# The good status code
GOOD_RET = 0
INPUT_ERROR = 1
IO_ERROR = 2
INVALID_DATA = 3
# Constants #
# Config File Sections
MAIN_SEC = 'main'
# Config keys
DUMP_FILE = 'dump_file'
DUMPS_FILE = 'dump_list_file'
ATOM_REORDER_FILE = 'atom_reorder_old_new_file'
ATOM_TYPE_FILE = 'atom_type_old_new_file'
MOL_RENUM_FILE = 'mol_renum_old_new_file'
OUT_BASE_DIR = 'output_directory'
MAX_STEPS = 'max_steps'
OUT_FREQ = 'output_every_n_steps'
RENUM_SHIFT = 'shift_mol_num_by'
RENUM_START_MOL = 'first_shift_mol_num'
# Defaults
DEF_CFG_FILE = 'dump_edit.ini'
# Set notation
DEF_CFG_VALS = {DUMPS_FILE: 'dump_list.txt',
DUMP_FILE: None,
OUT_BASE_DIR: None,
ATOM_REORDER_FILE: None,
ATOM_TYPE_FILE: None,
MOL_RENUM_FILE: None,
MAX_STEPS: -1,
OUT_FREQ: 1,
RENUM_START_MOL: -1,
RENUM_SHIFT: 0,
}
REQ_KEYS = {}
# From data template file
NUM_ATOMS = 'num_atoms'
TAIL_CONTENT = 'tail_content'
ATOMS_CONTENT = 'atoms_content'
HEAD_CONTENT = 'head_content'
# For data template file processing
SEC_HEAD = 'head_section'
SEC_ATOMS = 'atoms_section'
SEC_TAIL = 'tail_section'
# For dump file processing
SEC_TIMESTEP = 'timestep'
SEC_NUM_ATOMS = 'dump_num_atoms'
SEC_BOX_SIZE = 'dump_box_size'
# For deciding if a float is close enough to a value
TOL = 0.000001
# Bundle of headers for calculating charge
def read_cfg(f_loc, cfg_proc=process_cfg):
"""
Reads the given configuration file, returning a dict with the converted values supplemented by default values.
:param f_loc: The location of the file to read.
:param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw
value is missing.
:return: A dict of the processed configuration file's data.
"""
config = ConfigParser()
good_files = config.read(f_loc)
if not good_files:
raise IOError('Could not read file {}'.format(f_loc))
main_proc = cfg_proc(dict(config.items(MAIN_SEC)), DEF_CFG_VALS, REQ_KEYS)
return main_proc
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Creates new lammps dump files from existing files, with new order, '
'keeping every specified number of timesteps, and stopping at a '
'max number of timesteps.')
parser.add_argument("-c", "--config", help="The location of the configuration file in ini "
"The default file name is {}, located in the "
"base directory where the program as run.".format(DEF_CFG_FILE),
default=DEF_CFG_FILE, type=read_cfg)
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning("Problems reading file:", e)
parser.print_help()
return args, IO_ERROR
except KeyError as e:
warning("Input data missing:", e)
parser.print_help()
return args, INPUT_ERROR
except SystemExit as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning("Input data missing:", e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
def print_to_dump_file(head_content, atoms_struct, fname, mode='a'):
"""
Writes the list of sequences to the given file in the specified format for a PDB.
@param head_content: content to repeat for all files
@param atoms_struct: The list atoms to write to file.
@param fname: The location of the file to write.
@param mode: to append or write new file
"""
with open(fname, mode) as w_file:
for line in head_content:
w_file.write(line + '\n')
for line in atoms_struct:
w_file.write(' '.join(map(str, line)) + '\n')
if mode == 'w':
print("Wrote file: {}".format(fname))
def process_dump_file(cfg, dump_file, atom_num_dict, atom_type_dict, mol_num_dict):
section = None
box = np.zeros((3,))
counter = 1
num_atoms = 0
head_content = []
steps_count = 0
step_stop = cfg[MAX_STEPS] * cfg[OUT_FREQ]
timestep = None
with open(dump_file) as d:
d_out = create_out_fname(dump_file, suffix='_reorder', base_dir=cfg[OUT_BASE_DIR])
write_mode = 'w'
for line in d:
line = line.strip()
if section == SEC_ATOMS:
split_line = line.split()
# If there is an incomplete line in a dump file, move on to the next file
if len(split_line) < 7:
break
atom_num = int(split_line[0])
if atom_num in atom_num_dict:
atom_num = atom_num_dict[atom_num]
mol_num = int(split_line[1])
if mol_num in mol_num_dict:
mol_num = mol_num_dict[mol_num]
# Default RENUM_START_MOL is neg 1; if still less than zero, user did not specify renumbering
if 0 <= cfg[RENUM_START_MOL] <= mol_num:
mol_num += cfg[RENUM_SHIFT]
atom_type = int(split_line[2])
if atom_type in atom_type_dict:
atom_type = atom_type_dict[atom_type]
charge = float(split_line[3])
x, y, z = map(float, split_line[4:7])
atom_struct = [atom_num, mol_num, atom_type, charge, x, y, z]
atom_data.append(atom_struct)
if counter == num_atoms:
if len(atom_num_dict) > 0:
atom_data = sorted(atom_data, key=lambda atom: atom[0])
steps_count += 1
if steps_count % cfg[OUT_FREQ] == 0:
print_to_dump_file(head_content, atom_data, d_out, mode=write_mode)
if write_mode == 'w':
write_mode = 'a'
if steps_count == step_stop:
print("Reached the maximum number of steps ({})".format(cfg[MAX_STEPS]))
counter = 1
break
# reset for next timestep
head_content = []
counter = 0
section = None
counter += 1
else:
head_content.append(line)
if section is None:
section = find_dump_section_state(line)
if section is None:
raise InvalidDataError('Unexpected line in file {}: {}'.format(d, line))
elif section == SEC_TIMESTEP:
timestep = line
# Reset variables
atom_data = []
section = None
elif section == SEC_NUM_ATOMS:
num_atoms = int(line)
section = None
elif section == SEC_BOX_SIZE:
split_line = line.split()
diff = float(split_line[1]) - float(split_line[0])
box[counter - 1] = diff
if counter == 3:
counter = 0
section = None
counter += 1
if counter == 1:
print("Completed reading: {}".format(dump_file))
else:
warning("Dump file {} step {} did not have the full list of atom numbers. "
"Continuing program.".format(dump_file, timestep))
def process_dump_files(cfg, atom_num_dict, atom_type_dict, mol_num_dict):
if cfg[DUMP_FILE] is None:
dump_file_list = []
else:
dump_file_list = [cfg[DUMP_FILE]]
if os.path.isfile(cfg[DUMPS_FILE]):
with open(cfg[DUMPS_FILE]) as f:
for dump_file in f:
dump_file = dump_file.strip()
# ignore blank lines in dump file list
if len(dump_file) == 0:
continue
else:
dump_file_list.append(dump_file)
else:
warning("Did not find file: '{}'".format(cfg[DUMPS_FILE]))
if len(dump_file_list) == 0:
raise InvalidDataError("Found no files to process. In the configuration file, specify one file "
"with the keyword '{}' or a list of "
"files with the keyword '{}'".format(DUMP_FILE, DUMPS_FILE))
for dump_file in dump_file_list:
process_dump_file(cfg, dump_file, atom_num_dict, atom_type_dict, mol_num_dict)
def main(argv=None):
# Read input
args, ret = parse_cmdline(argv)
if ret != GOOD_RET or args is None:
return ret
# Read template and dump files
cfg = args.config
try:
atom_num_dict = read_csv_dict(cfg[ATOM_REORDER_FILE])
atom_type_dict = read_csv_dict(cfg[ATOM_TYPE_FILE], one_to_one=False)
mol_num_dict = read_csv_dict(cfg[MOL_RENUM_FILE], one_to_one=False)
process_dump_files(cfg, atom_num_dict, atom_type_dict, mol_num_dict)
except IOError as e:
warning("Problems reading file:", e)
return IO_ERROR
except InvalidDataError as e:
warning("Problems reading data:", e)
return INVALID_DATA
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
{
"content_hash": "01d2724cbe183850974bcde1758c193c",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 118,
"avg_line_length": 35.19112627986348,
"alnum_prop": 0.5529046649209582,
"repo_name": "team-mayes/md_utils",
"id": "f019c212c8e8e9a274864997e60c9d8498338952",
"size": "10333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "md_utils/dump_edit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1698"
},
{
"name": "Python",
"bytes": "864423"
},
{
"name": "Shell",
"bytes": "19211"
},
{
"name": "Smarty",
"bytes": "41046"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
def send_text(text, frm, to, fail_silently=True, status_report=False):
"""A convenient helper to quickly access the application features.
Returns a :class:`~djsms.models.TextMessage` object.
Required parameters:
:param text: the text message body. It's length is not limited, but if it's
too long, your carrier will probably split it and send in a multipart
text message, and you will be charged accordingly
:param frm: the originator phone number. Some carriers accept
alphanumerical values, but it really depends on mobile networks and
local laws.
:param to: the recipient phone number.
Additionals parameters:
:param fail_silently: If True, errors will just be ignored.
:param status_report: If True, asks the selected carrier to provide
status report by asynchronously calling an uri. If your carrier does'nt
provide this option, the parameter value will simply be ignored.
Raises:
Any :class:`~djsms.exceptions.TextMessageError` subclass if
``fail_silently`` is False.
"""
pass
|
{
"content_hash": "2f5a360a2d1a1813e835709355e4e717",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 35.78125,
"alnum_prop": 0.7048034934497817,
"repo_name": "thibault/django-simple-sms",
"id": "8ad60c6201bab93b9a8947bb32977dcdbe6ab5da",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djsms/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14593"
},
{
"name": "Shell",
"bytes": "6723"
}
],
"symlink_target": ""
}
|
def extractBierutranslationsHomeBlog(item):
'''
Parser for 'bierutranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "4436c0a3d428a6c1dde54a04eb7372fd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.6389380530973451,
"repo_name": "fake-name/ReadableWebProxy",
"id": "ba1cce940083a1e90b7891cd1938ab449ef8d4d3",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractBierutranslationsHomeBlog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import cPickle as pickle
from argparse import ArgumentParser
import numpy as np
import cv2
parser = ArgumentParser()
parser.add_argument('file_path')
options = parser.parse_args()
def data_uri_to_cv2_img(uri):
encoded_data = uri.split(',')[1]
np_arr = np.fromstring(encoded_data.decode('base64'), np.uint8)
img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
return img
obj = pickle.load(open(options.file_path))
print(obj['data_url'])
#data_uri_to_cv2_img(obj)
#import ipdb
#ipdb.set_trace()
|
{
"content_hash": "a29f4489dc7a561ff355d352f8a1e64e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 22.08695652173913,
"alnum_prop": 0.7106299212598425,
"repo_name": "GXIC-Real-Intelligence-Team/surveillance-core",
"id": "d38981d842aabfa47ecf2f0b12b11c407f341f2a",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/load_pickle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "29799"
},
{
"name": "Python",
"bytes": "28602"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
}
|
from quex.engine.misc.file_in import write_safely_and_close
from quex.blackboard import setup as Setup
import quex.output.cpp.source_package as source_package
import quex.blackboard as blackboard
from quex.engine.generator.action_info import UserCodeFragment_straighten_open_line_pragmas
#
import quex.input.files.core as quex_file_parser
#
import quex.output.cpp.core as cpp_generator
import quex.output.cpp.token_id_maker as token_id_maker
import quex.output.cpp.token_class_maker as token_class_maker
import quex.output.cpp.analyzer_class as analyzer_class
import quex.output.cpp.configuration as configuration
import quex.output.cpp.mode_classes as mode_classes
import quex.output.cpp.action_preparation as action_preparation
import quex.output.cpp.codec_converter_helper as codec_converter_helper
import quex.output.graphviz.core as grapviz_generator
def do():
"""Generates state machines for all modes. Each mode results into
a separate state machine that is stuck into a virtual function
of a class derived from class 'quex_mode'.
"""
if Setup.language == "DOT":
return do_plot()
mode_db = quex_file_parser.do(Setup.input_mode_files)
# (*) [Optional] Generate a converter helper
codec_converter_helper_header, \
codec_converter_helper_implementation = codec_converter_helper.do()
# (*) Generate the token ids
# (This needs to happen after the parsing of mode_db, since during that
# the token_id_db is developed.)
if Setup.external_lexeme_null_object != "":
# Assume external implementation
token_id_header = None
function_map_id_to_name_implementation = ""
else:
token_id_header = token_id_maker.do(Setup)
function_map_id_to_name_implementation = token_id_maker.do_map_id_to_name_function()
# (*) [Optional] Make a customized token class
class_token_header, \
class_token_implementation = token_class_maker.do(function_map_id_to_name_implementation)
if Setup.token_class_only_f:
write_safely_and_close(blackboard.token_type_definition.get_file_name(),
do_token_class_info() \
+ class_token_header)
write_safely_and_close(Setup.output_token_class_file_implementation,
class_token_implementation)
write_safely_and_close(Setup.output_token_id_file, token_id_header)
return
# (*) Implement the 'quex' core class from a template
# -- do the coding of the class framework
configuration_header = configuration.do(mode_db)
class_analyzer_header = analyzer_class.do(mode_db)
class_analyzer_implementation = analyzer_class.do_implementation(mode_db)
mode_implementation = mode_classes.do(mode_db)
# (*) implement the lexer mode-specific analyser functions
function_analyzers_implementation = analyzer_functions_get(mode_db)
# Implementation (Potential Inline Functions)
class_implemtation = class_analyzer_implementation + "\n"
if class_token_implementation is not None:
class_implemtation += class_token_implementation + "\n"
# Engine (Source Code)
engine_txt = mode_implementation + "\n" \
+ function_analyzers_implementation + "\n" \
+ function_map_id_to_name_implementation + "\n"
# (*) Write Files ___________________________________________________________________
if codec_converter_helper_header is not None:
write_safely_and_close(Setup.output_buffer_codec_header, codec_converter_helper_header)
write_safely_and_close(Setup.output_buffer_codec_header_i, codec_converter_helper_implementation)
if token_id_header is not None:
write_safely_and_close(Setup.output_token_id_file, token_id_header)
write_safely_and_close(Setup.output_configuration_file, configuration_header)
if Setup.language == "C":
engine_txt += class_implemtation
else:
class_analyzer_header = class_analyzer_header.replace("$$ADDITIONAL_HEADER_CONTENT$$", class_implemtation)
write_safely_and_close(Setup.output_header_file, class_analyzer_header)
write_safely_and_close(Setup.output_code_file, engine_txt)
if class_token_header is not None:
write_safely_and_close(blackboard.token_type_definition.get_file_name(),
class_token_header)
for file_name in [Setup.output_header_file,
Setup.output_code_file,
blackboard.token_type_definition.get_file_name()]:
UserCodeFragment_straighten_open_line_pragmas(file_name, "C")
if Setup.source_package_directory != "":
source_package.do()
def analyzer_functions_get(ModeDB):
IndentationSupportF = blackboard.requires_indentation_count(ModeDB)
BeginOfLineSupportF = blackboard.requires_begin_of_line_condition_support(ModeDB)
inheritance_info_str = ""
analyzer_code = ""
# (*) Get list of modes that are actually implemented
# (abstract modes only serve as common base)
mode_list = [ mode for mode in ModeDB.itervalues()
if mode.options["inheritable"] != "only" ]
mode_name_list = [ mode.name for mode in mode_list ]
for mode in mode_list:
# -- some modes only define event handlers that are inherited
if len(mode.get_pattern_action_pair_list()) == 0: continue
# -- prepare the source code fragments for the generator
required_local_variables_db, \
pattern_action_pair_list, \
on_end_of_stream_action, \
on_failure_action, \
on_after_match_str = action_preparation.do(mode, IndentationSupportF, BeginOfLineSupportF)
# -- prepare code generation
generator = cpp_generator.Generator(StateMachineName = mode.name,
PatternActionPair_List = pattern_action_pair_list,
OnFailureAction = on_failure_action,
OnEndOfStreamAction = on_end_of_stream_action,
OnAfterMatch = on_after_match_str,
ModeNameList = mode_name_list)
# -- generate!
analyzer_code += "".join(generator.do(required_local_variables_db))
if Setup.comment_mode_patterns_f:
inheritance_info_str += mode.get_documentation()
# Bring the info about the patterns first
if Setup.comment_mode_patterns_f:
comment = []
Setup.language_db.ML_COMMENT(comment,
"BEGIN: MODE PATTERNS\n" + \
inheritance_info_str + \
"\nEND: MODE PATTERNS")
comment.append("\n") # For safety: New content may have to start in a newline, e.g. "#ifdef ..."
analyzer_code += "".join(comment)
# generate frame for analyser code
return cpp_generator.frame_this(analyzer_code)
def do_plot():
mode_db = quex_file_parser.do(Setup.input_mode_files)
for mode in mode_db.values():
# -- some modes only define event handlers that are inherited
pattern_action_pair_list = mode.get_pattern_action_pair_list()
if len(pattern_action_pair_list) == 0: continue
plotter = grapviz_generator.Generator(pattern_action_pair_list,
StateMachineName = mode.name)
plotter.do(Option=Setup.character_display)
def do_token_class_info():
info_list = [
" --token-id-prefix %s" % Setup.token_id_prefix,
" --token-class-file %s" % Setup.output_token_class_file,
" --token-class %s" % Setup.token_class,
" --token-id-type %s" % Setup.token_id_type,
" --buffer-element-type %s" % Setup.buffer_element_type,
" --lexeme-null-object %s" % Setup.lexeme_null_full_name_cpp,
" --foreign-token-id-file %s" % Setup.output_token_id_file,
]
print "info: Analyzers using this token class must be generated with"
print "info:"
for line in info_list:
print "info: %s" % line
print "info:"
print "info: Header: \"%s\"" % blackboard.token_type_definition.get_file_name()
print "info: Source: \"%s\"" % Setup.output_token_class_file_implementation
comment = ["<<<QUEX-OPTIONS>>>\n"]
for line in info_list:
if line.find("--token-class-file") != -1: continue
comment.append("%s\n" % line)
comment.append("<<<QUEX-OPTIONS>>>")
txt = []
Setup.language_db.ML_COMMENT(txt, "".join(comment), IndentN=0)
return "".join(txt) + "\n"
|
{
"content_hash": "a8283d0e660254d2149e1853d44f20f1",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 114,
"avg_line_length": 45.23267326732673,
"alnum_prop": 0.6061070373207836,
"repo_name": "coderjames/pascal",
"id": "7335d7759e4f585f9e7e2fde4dfda681f3a19559",
"size": "9137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quex-0.63.1/quex/core.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "194851"
},
{
"name": "C++",
"bytes": "78624"
},
{
"name": "Delphi",
"bytes": "5659"
},
{
"name": "Python",
"bytes": "1350210"
}
],
"symlink_target": ""
}
|
from numpy import array, einsum, exp
from .constants import ASEC2RAD
from .earthlib import compute_limb_angle, refract, terra
from .functions import from_polar, length_of, to_polar, rot_x, rot_y, rot_z
from .chaining import Body, Segment
from .timelib import takes_julian_date
from .units import (Distance, Velocity, Angle, _interpret_angle,
_interpret_ltude)
class Topos(Body):
"""An object representing a specific location on the Earth's surface."""
def __init__(self, latitude=None, longitude=None, latitude_degrees=None,
longitude_degrees=None, elevation_m=0.0, x=0.0, y=0.0):
if latitude_degrees is not None:
latitude = Angle(degrees=latitude_degrees)
elif isinstance(latitude, (str, float, tuple)):
latitude = _interpret_ltude(latitude, 'latitude', 'N', 'S')
elif not isinstance(latitude, Angle):
raise TypeError('please provide either latitude_degrees=<float>'
' or latitude=<skyfield.units.Angle object>'
' with north being positive')
if longitude_degrees is not None:
longitude = Angle(degrees=longitude_degrees)
elif isinstance(longitude, (str, float, tuple)):
longitude = _interpret_ltude(longitude, 'longitude', 'E', 'W')
elif not isinstance(longitude, Angle):
raise TypeError('please provide either longitude_degrees=<float>'
' or longitude=<skyfield.units.Angle object>'
' with east being positive')
self.latitude = latitude
self.longitude = longitude
self.elevation = Distance(m=elevation_m)
self.x = x
self.y = y
self.R_lat = rot_y(latitude.radians)[::-1]
self.code = self
self.segments = [Segment(399, self, self.compute)]
def __repr__(self):
return '<Topos {0} N, {1} E>'.format(self.latitude, self.longitude)
def compute(self, jd):
position, velocity = self._position_and_velocity(jd)
return position, velocity
# @takes_julian_date
# def __call__(self, jd):
# """Compute where this Earth location was in space on a given date."""
# e = self.ephemeris.earth(jd)
# tpos_au, tvel_au_per_d = self._position_and_velocity(jd)
# t = Barycentric(e.position.au + tpos_au,
# e.velocity.au_per_d + tvel_au_per_d,
# jd)
# t.geocentric = False # test, then get rid of this attribute
# t.rGCRS = tpos_au
# t.vGCRS = tvel_au_per_d
# t.topos = self
# t.ephemeris = self.ephemeris
# t.altaz_rotation = self._altaz_rotation(jd)
# return t
# @takes_julian_date
# def gcrs(self, jd):
# """Compute where this location was in the GCRS on a given date."""
# tpos_au, tvel_au_per_d = self._position_and_velocity(jd)
# t = Geocentric(tpos_au, tvel_au_per_d, jd)
# t.topos = self
# t.ephemeris = self.ephemeris
# t.altaz_rotation = self._altaz_rotation(jd)
# return t
def _position_and_velocity(self, jd):
"""Return the GCRS position, velocity of this Topos at `jd`."""
pos, vel = terra(self.latitude.radians, self.longitude.radians,
self.elevation.au, jd.gast)
pos = einsum('ij...,j...->i...', jd.MT, pos)
vel = einsum('ij...,j...->i...', jd.MT, vel)
if self.x:
R = rot_y(self.x * ASEC2RAD)
pos = einsum('ij...,j...->i...', R, pos)
if self.y:
R = rot_x(self.y * ASEC2RAD)
pos = einsum('ij...,j...->i...', R, pos)
# TODO: also rotate velocity
return pos, vel
def _altaz_rotation(self, jd):
"""Compute the rotation from the ICRS into the alt-az system."""
R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)
return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)
|
{
"content_hash": "a19bd7ec88ff81a7e21ddbb7ed0d6db1",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 41.68041237113402,
"alnum_prop": 0.5711105614642592,
"repo_name": "exoanalytic/python-skyfield",
"id": "05fc6afa4ad1179c45f228f5d0123fb66c5e2fb7",
"size": "4043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skyfield/toposlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "461182"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginError
log = logging.getLogger('list_add')
class ListAdd(object):
schema = {
'type': 'array',
'items': {
'allOf': [
{'$ref': '/schema/plugins?interface=list'},
{
'maxProperties': 1,
'error_maxProperties': 'Plugin options within list_add plugin must be indented 2 more spaces than '
'the first letter of the plugin name.',
'minProperties': 1
}
]
}
}
def on_task_start(self, task, config):
for item in config:
for plugin_name, plugin_config in item.items():
try:
thelist = plugin.get_plugin_by_name(plugin_name).instance.get_list(plugin_config)
except AttributeError:
raise PluginError('Plugin %s does not support list interface' % plugin_name)
if thelist.immutable:
raise plugin.PluginError(thelist.immutable)
# Run later in the phase, to capture any entry fields that might change during the output phase
@plugin.priority(0)
def on_task_output(self, task, config):
if not len(task.accepted) > 0:
log.debug('no accepted entries, nothing to add')
return
for item in config:
for plugin_name, plugin_config in item.items():
thelist = plugin.get_plugin_by_name(plugin_name).instance.get_list(plugin_config)
if task.manager.options.test and thelist.online:
log.info('`%s` is marked as an online plugin, would add accepted items outside of --test mode. '
'Skipping', plugin_name)
continue
log.verbose('adding accepted entries into %s - %s', plugin_name, plugin_config)
thelist |= task.accepted
@event('plugin.register')
def register_plugin():
plugin.register(ListAdd, 'list_add', api_ver=2)
|
{
"content_hash": "71180237a1f027ea1a3de37adebd49f0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 38.76271186440678,
"alnum_prop": 0.574989068648885,
"repo_name": "qk4l/Flexget",
"id": "595e12407a3ea80165ed2d2691b1f3755dc20205",
"size": "2287",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "flexget/plugins/output/list_add.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3322934"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
"""Sensor platform support for yeelight."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CONFIG_ENTRIES, DATA_DEVICE, DATA_UPDATED, DOMAIN
from .entity import YeelightEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Yeelight from a config entry."""
device = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][config_entry.entry_id][DATA_DEVICE]
if device.is_nightlight_supported:
_LOGGER.debug("Adding nightlight mode sensor for %s", device.name)
async_add_entities([YeelightNightlightModeSensor(device, config_entry)])
class YeelightNightlightModeSensor(YeelightEntity, BinarySensorEntity):
"""Representation of a Yeelight nightlight mode sensor."""
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
DATA_UPDATED.format(self._device.host),
self.async_write_ha_state,
)
)
await super().async_added_to_hass()
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._unique_id}-nightlight_sensor"
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._device.name} nightlight"
@property
def is_on(self):
"""Return true if nightlight mode is on."""
return self._device.is_nightlight_enabled
|
{
"content_hash": "5bf4ed937825438887a8f1f6f9acb752",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 87,
"avg_line_length": 34.26923076923077,
"alnum_prop": 0.6812570145903479,
"repo_name": "jawilson/home-assistant",
"id": "89eb910f942393fd89fee61c8d97c5ee819069f7",
"size": "1782",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/yeelight/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import logging
from datetime import datetime
from constants import *
import handlers
class WarmupHandler(handlers.BaseRequestHandler):
def get(self):
logging.info("Warmup Request")
class Monthly(handlers.BaseRequestHandler):
def get(self):
pass
class AdminDigest(handlers.BaseRequestHandler):
def get(self):
pass
|
{
"content_hash": "a17e08654fdf9d3af85a61aab0780137",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 17.8,
"alnum_prop": 0.7247191011235955,
"repo_name": "onejgordon/action-potential",
"id": "fb0335ec3908fd86869b1e5abeddb59ed615971e",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/cronActions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "143150"
},
{
"name": "HTML",
"bytes": "2676"
},
{
"name": "JavaScript",
"bytes": "169821"
},
{
"name": "Python",
"bytes": "163865"
},
{
"name": "Shell",
"bytes": "2725"
}
],
"symlink_target": ""
}
|
''' A bar chart based on simple Python lists of data. This example demonstrates
automatic colormapping.
.. bokeh-example-metadata::
:apis: bokeh.plotting.figure.vbar, bokeh.transform.factor_cmap
:refs: :ref:`ug_basic_bars_filled`, :ref:`ug_basic_bars_filled_colors`
:keywords: bar, colormap, legend, palette, vbar
'''
from bokeh.models import ColumnDataSource
from bokeh.palettes import Bright6
from bokeh.plotting import figure, show
from bokeh.transform import factor_cmap
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
counts = [5, 3, 4, 2, 4, 6]
source = ColumnDataSource(data=dict(fruits=fruits, counts=counts))
p = figure(x_range=fruits, height=350, toolbar_location=None, title="Fruit Counts")
p.vbar(x='fruits', top='counts', width=0.9, source=source, legend_field="fruits",
line_color='white', fill_color=factor_cmap('fruits', palette=Bright6, factors=fruits))
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.y_range.end = 9
p.legend.orientation = "horizontal"
p.legend.location = "top_center"
show(p)
|
{
"content_hash": "92a1af9ba250797bb96cd5d48d02cdc6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 93,
"avg_line_length": 34.645161290322584,
"alnum_prop": 0.7262569832402235,
"repo_name": "bokeh/bokeh",
"id": "9553e9df2beed8acf05c636292e94c606c6f5628",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/basic/bars/colormapped.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
"""
William Dreese 2017
Developement
Takes a list of nodes and uses Naive Bayes machince learning
to predict test cases. Handles 1 file currently.
-add Laplace smoothing
-add pitcher analysis
-add streak analysis
"""
from nodeGeneral import Node, teamDict
class nodeBayes:
def __init__(self, nodeList):
self._list = nodeList
self._l = len(self._list)
tW = 0.0
for node in self._list:
tW += float(node._attributes[-1])
self._totalProb = tW/float(self._l)
def compileAtt(self, testVal, att):
tmW = 0.0
tm = 0.0
for node in self._list:
if node._attributes[att] == testVal:
tmW += float(node._attributes[-1])
tm += 1.0
ret = tmW/tm
return ret / float(tm/float(self._l))
def test(self, oppo,div,ha,dn):
#apply the correct compiled probs using the test case, made for baseball
op = teamDict[oppo]
ret = self._totalProb
ret *= self.compileAtt(op,0)
ret *= self.compileAtt(div,1)
ret *= self.compileAtt(ha,2)
ret *= self.compileAtt(dn,3)
ret = float(int(ret*10000.0))/100.0
print("PHI v",oppo,"win chance: %",ret)
|
{
"content_hash": "16e3cebbf03093ceba7fb55ec0103695",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 27.630434782608695,
"alnum_prop": 0.5656963021243115,
"repo_name": "Dreeseaw/BaseballNodes",
"id": "935b742c5d5024940f77fd1d3cb6892745672c63",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodeBayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5858"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# For Python 2 compatibility
# from __future__ import print_function
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
from random import randint
from pandas import to_datetime
import pandas as pd
import numpy as np
import os
def fillNoneValues(column):
"""Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values.
"""
if column.dtype == object:
column.fillna('', inplace=True)
return column
def convertTimestamps(column):
"""Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
"""
tempColumn = column
try:
# Try to convert the first row and a random row instead of the complete
# column, might be faster
# tempValue = np.datetime64(column[0])
tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])
tempColumn = column.apply(to_datetime)
except Exception:
pass
return tempColumn
def superReadCSV(filepath, first_codec='UTF_8', usecols=None,
low_memory=False, dtype=None, parse_dates=True,
sep=',', chunksize=None, verbose=False, **kwargs):
"""
A wrap to pandas read_csv with mods to accept a dataframe or
filepath. returns dataframe untouched, reads filepath and returns
dataframe based on arguments.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
assert isinstance(first_codec, str), "first_codec must be a string"
codecs = ['UTF_8', 'ISO-8859-1', 'ASCII', 'UTF_16', 'UTF_32']
try:
codecs.remove(first_codec)
except ValueError as not_in_list:
pass
codecs.insert(0, first_codec)
errors = []
for c in codecs:
try:
return pd.read_csv(filepath,
usecols=usecols,
low_memory=low_memory,
encoding=c,
dtype=dtype,
parse_dates=parse_dates,
sep=sep,
chunksize=chunksize,
**kwargs)
# Need to catch `UnicodeError` here, not just `UnicodeDecodeError`,
# because pandas 0.23.1 raises it when decoding with UTF_16 and the
# file is not in that format:
except (UnicodeError, UnboundLocalError) as e:
errors.append(e)
except Exception as e:
errors.append(e)
if 'tokenizing' in str(e):
pass
else:
raise
if verbose:
[print(e) for e in errors]
raise UnicodeDecodeError("Tried {} codecs and failed on all: \n CODECS: {} \n FILENAME: {}".format(
len(codecs), codecs, os.path.basename(filepath)))
def _count(item, string):
if len(item) == 1:
return len(''.join(x for x in string if x == item))
return len(str(string.split(item)))
def identify_sep(filepath):
"""
Identifies the separator of data in a filepath.
It reads the first line of the file and counts supported separators.
Currently supported separators: ['|', ';', ',','\t',':']
"""
ext = os.path.splitext(filepath)[1].lower()
allowed_exts = ['.csv', '.txt', '.tsv']
assert ext in ['.csv', '.txt'], "Unexpected file extension {}. \
Supported extensions {}\n filename: {}".format(
ext, allowed_exts, os.path.basename(filepath))
maybe_seps = ['|',
';',
',',
'\t',
':']
with open(filepath,'r') as fp:
header = fp.__next__()
count_seps_header = {sep: _count(sep, header) for sep in maybe_seps}
count_seps_header = {sep: count for sep,
count in count_seps_header.items() if count > 0}
if count_seps_header:
return max(count_seps_header.__iter__(),
key=(lambda key: count_seps_header[key]))
else:
raise Exception("Couldn't identify the sep from the header... here's the information:\n HEADER: {}\n SEPS SEARCHED: {}".format(header, maybe_seps))
def superReadText(filepath, **kwargs):
"""
A wrapper to superReadCSV which wraps pandas.read_csv().
The benefit of using this function is that it automatically identifies the
column separator.
.tsv files are assumed to have a \t (tab) separation
.csv files are assumed to have a comma separation.
.txt (or any other type) get the first line of the file opened
and get tested for various separators as defined in the identify_sep
function.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
sep = kwargs.get('sep', None)
ext = os.path.splitext(filepath)[1].lower()
if sep is None:
if ext == '.tsv':
kwargs['sep'] = '\t'
elif ext == '.csv':
kwargs['sep'] = ','
else:
found_sep = identify_sep(filepath)
print(found_sep)
kwargs['sep'] = found_sep
return superReadCSV(filepath, **kwargs)
def superReadFile(filepath, **kwargs):
"""
Uses pandas.read_excel (on excel files) and returns a dataframe of the
first sheet (unless sheet is specified in kwargs)
Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of
the data. One function to read almost all types of data files.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
ext = os.path.splitext(filepath)[1].lower()
if ext in ['.xlsx', '.xls']:
df = pd.read_excel(filepath, **kwargs)
elif ext in ['.pkl', '.p', '.pickle', '.pk']:
df = pd.read_pickle(filepath)
else:
# Assume it's a text-like file and try to read it.
try:
df = superReadText(filepath, **kwargs)
except Exception as e:
# TODO: Make this trace back better? Custom Exception? Raise original?
raise Exception("Error reading file: {}".format(e))
return df
def dedupe_cols(frame):
"""
Need to dedupe columns that have the same name.
"""
cols = list(frame.columns)
for i, item in enumerate(frame.columns):
if item in frame.columns[:i]:
cols[i] = "toDROP"
frame.columns = cols
return frame.drop("toDROP", 1, errors='ignore')
def rename_dupe_cols(cols):
"""
Takes a list of strings and appends 2,3,4 etc to duplicates. Never
appends a 0 or 1. Appended #s are not always in order...but if you wrap
this in a dataframe.to_sql function you're guaranteed to not have dupe
column name errors importing data to SQL...you'll just have to check
yourself to see which fields were renamed.
"""
counts = {}
positions = {pos: fld for pos, fld in enumerate(cols)}
for c in cols:
if c in counts.keys():
counts[c] += 1
else:
counts[c] = 1
fixed_cols = {}
for pos, col in positions.items():
if counts[col] > 1:
fix_cols = {pos: fld for pos, fld in positions.items() if fld == col}
keys = [p for p in fix_cols.keys()]
min_pos = min(keys)
cnt = 1
for p, c in fix_cols.items():
if not p == min_pos:
cnt += 1
c = c + str(cnt)
fixed_cols.update({p: c})
positions.update(fixed_cols)
cols = [x for x in positions.values()]
return cols
|
{
"content_hash": "b7241d49b96e3ea2e587f48a05c0312f",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 155,
"avg_line_length": 32.24609375,
"alnum_prop": 0.5699576014536645,
"repo_name": "draperjames/qtpandas",
"id": "060c8ed71878b34f86670eeafe5e1259673927ea",
"size": "8255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qtpandas/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "461072"
}
],
"symlink_target": ""
}
|
"""Generate a square wave in an .aiff audio file."""
import aifc
import struct
samples_per_second = 44100
C3_Hz = 261.63
samples_high = int(samples_per_second / C3_Hz * 0.5)
samples_low = int(samples_per_second / C3_Hz * 0.5)
amplitude = 2**14 # Leave some headroom
output_file_name = 'square_C3.aiff'
f = aifc.open(output_file_name, 'wb')
print
print 'writing samples to "%s"...' % output_file_name,
f.setnchannels(1)
f.setsampwidth(2)
f.setframerate(int(samples_per_second))
clock = 0.0
while clock <= 3 * samples_per_second:
samples = []
clock_limit = clock + samples_high
while clock <= clock_limit:
samples.append(amplitude)
clock += 1
clock_limit = clock + samples_low
while clock <= clock_limit:
samples.append(-amplitude)
clock += 1
f.writeframes(struct.pack('>%dh' % len(samples), *samples))
f.close()
print 'done'
print
|
{
"content_hash": "9e993a6670d108d4a008f11a6b47d823",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 27.75,
"alnum_prop": 0.6632882882882883,
"repo_name": "wsnook/sandbox",
"id": "d854baae3e51dbf3fc4661d14c4379c4fa1c46f2",
"size": "888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio/square_wave_aiff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3235"
},
{
"name": "Go",
"bytes": "665"
},
{
"name": "HTML",
"bytes": "9148"
},
{
"name": "JavaScript",
"bytes": "9308"
},
{
"name": "Python",
"bytes": "12232"
},
{
"name": "Shell",
"bytes": "2046"
},
{
"name": "Vim script",
"bytes": "645"
}
],
"symlink_target": ""
}
|
from rockstar import RockStar
lua_code = "print 'hello world!'"
rock_it_bro = RockStar(days=400, file_name='helloworld.lua', code=lua_code)
rock_it_bro.make_me_a_rockstar()
|
{
"content_hash": "b2292a39ce1005b9c7d25f3e4d5ba84a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 75,
"avg_line_length": 34.8,
"alnum_prop": 0.735632183908046,
"repo_name": "Endika/rockstar",
"id": "80ef0fd6c18d740460eb7e912eb6df7a5ce9490a",
"size": "174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/lua_rockstar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4996"
}
],
"symlink_target": ""
}
|
EAT_PLUGIN = 1
EAT_HEXCHAT = 2
EAT_ALL = 3
EAT_NONE = None
PRI_HIGHEST = 9
PRI_HIGH = 8
PRI_NORM = 7
PRI_LOW = 6
PRI_LOWEST = 5
import re
import traceback
import threading
import socket
import os
import os.path
import sys
locky = threading.RLock()
cmd_pattern = re.compile(r'eg: /(.+)', re.MULTILINE)
def parse(yo):
eol = []
tot = 0
word = yo.split()
for x in word:
eol.append(yo.split(" ", tot)[-1])
tot += 1
return word, eol
def prnt(stri):
q.put(pprnt(stri))
def pastebin(tb):
with locky:
try:
sock = socket.socket()
sock.connect(("termbin.com", 9999))
if sys.version_info [0] == 3:
sock.send(tb.encode("utf-8", "replace") + b"\n")
url = sock.recv(1024).decode("utf-8")
else:
sock.send(tb+'\n')
url = sock.recv(1024)
sock.close()
except socket.error:
traceback.print_exc()
else:
return url
if __name__ == "__main__":
import os
broken = 0
print("HexChat addons test script.")
print("There might be weird stuff. Don't worry. :)\n")
for val in os.listdir("addons"):
path = os.path.join('addons','__init__.py')
open(path,'w').close()
if val.startswith('__'):
continue
if val.endswith('.pyc'):
continue
if os.path.isdir(os.path.join('addons',val)):
for x in os.listdir(os.path.join('addons',val)):
if not x.endswith('.py'):
continue
if x.startswith('__'):
continue
x = x.replace('.py','')
x = "{0}.{1}".format(val, x)
print("Testing {0}".format(x))
ppath = os.path.join('addons',val,'__init__.py')
open(ppath, 'w').close()
try:
__import__('addons.{0}'.format(x), globals=globals())
print("{0} is WORKING.".format(x))
os.remove(ppath)
except Exception as err:
errurl = pastebin(traceback.format_exc())
print("{0} is FAILING. ({1}: {2})".format(x, err, errurl))
broken = 1
if not val.endswith('.py'):
continue
val = val.replace(".py", "")
print("Testing {0}".format(val))
try:
__import__('addons.{0}'.format(val), globals=globals())
print("{0} is WORKING.".format(val))
except Exception as err:
print("{0} is FAILING. ({1})".format(val, err))
broken = 1
os.remove(path)
if broken == 1:
print("\nThere are broken addons. :(")
sys.exit(1)
else:
print("\nEverything is fine. :)")
sys.exit(0)
def hook_command(name, function, help=None):
if help:
yo = cmd_pattern.findall(help)
for each in yo:
word, eol = parse(each)
function(word, eol, None)
def command(command):
split = command.replace("\x034", "").split(" ")
if split[0] == "say":
print("<testuser> {0}".format(command.replace(split[0] + " ", "")))
elif split[0] == "me":
print("* testuser {0}".format(command.replace(split[0] + " ", "")))
elif split[0] == "msg" or split[0] == "privmsg":
print(">{0}< {1}".format(split[1], command.replace(split[0] + " ", "").replace(split[1] + " ", "")))
elif split[0] == "notice":
print("->{0}<- {1}".format(split[1], command.replace(split[0] + " ", "").replace(split[1] + " ", "")))
rlines ={
"nick": ':Slavetator!noteness@unaffiliated/nessessary129/bot/slavetator NICK :Slavetator___',
"kick":':Slavetator!noteness@unaffiliated/nessessary129/bot/slavetator KICK noteness :You should know better',
"priv": ':Slavetator!noteness@unaffiliated/nessessary129/bot/slavetator PRIVMSG #Slavetator-test :Hello'
}
datas = {
}
def hook_server(raw,func,priority):
raw = raw.lower()
raws = rlines.get(raw, None)
word, eol = parse(raws)
print('*** Server sends --> '+raws)
data = datas.get(raw, None)
bb = func(word, eol, data)
if bb != EAT_ALL:
print('*** Plugins recieves <-- '+raws)
elif bb != EAT_HEXCHAT:
print('*** We recieves <-- '+raws )
elif bb == EAT_PLUGIN:
print("*** Current Plugin stops processing")
def hook_print(name, func, priority):
raws = rlines['priv']
word, eol = parse(raws)
func(word, eol, name)
calls = {}
def hook_timer(time, func, userdata=None):
global calls
if time in calls:
return
calls[time] = 1
func(userdata)
def prnt(stri):
print(stri)
def get_pluginpref(idk):
pass
def set_pluginpref(_,__):
pass
def hook_unload(_):
pass
class dupe:
def command(self,cmd):
return
def find_context(server=None,channel=None):
global calls
if server and channel:
if server + channel in calls:
return dupe()
calls[server + channel] = 1
if channel:
if channel in calls:
return dupe()
calls[channel] = 1
if server:
if server in calls:
return dupe()
calls[server] = 1
return sys.modules[__name__]
def getcontext():
return sys.modules[__name__]
|
{
"content_hash": "63183c02c77c33d5cc7969e47782413e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 114,
"avg_line_length": 28.978494623655912,
"alnum_prop": 0.5233766233766234,
"repo_name": "necessary129/my-hexchat-addons",
"id": "f192c61fae5138b845e773a50ae0b8d70a0ef608",
"size": "6396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hexchat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25904"
}
],
"symlink_target": ""
}
|
import re
import logging
import json as simplejson
from google.appengine.api import channel
from wins import Wins
#from match_results import setMatchResults
from matchmaker_results import setMatchMakerResults
#from deactivate_player import deactivatePlayer
class GameUpdater():
game = None
def __init__(self, game):
self.game = game
def get_game_message(self):
gameUpdate = {
'board': self.game.board,
'userX': self.game.userX.user_id(),
'userO': '' if not self.game.userO else self.game.userO.user_id(),
'moveX': self.game.moveX,
'winner': self.game.winner,
'winningBoard': self.game.winning_board,
'tied': self.game.tied,
'userXleetcoinKey' : self.game.userXleetcoinKey or '',
'userOleetcoinKey' :self.game.userOleetcoinKey or '',
}
logging.info('gameUpdate: %s'%gameUpdate)
gameJson = simplejson.dumps(gameUpdate)
logging.info('gameJson: %s'%gameJson)
return gameJson
def send_update(self):
message = self.get_game_message()
channel.send_message(self.game.userX.user_id() + self.game.key().id_or_name(), message)
if self.game.userO:
channel.send_message(self.game.userO.user_id() + self.game.key().id_or_name(), message)
def check_win(self):
if self.game.moveX:
# O just moved, check for O wins
wins = Wins().o_wins
potential_winner = self.game.userO.user_id()
else:
# X just moved, check for X wins
wins = Wins().x_wins
potential_winner = self.game.userX.user_id()
for win in wins:
if win.match(self.game.board):
logging.info("potential_winner: %s" % potential_winner)
self.game.winner = potential_winner
self.game.winning_board = win.pattern
player_keys = [str(self.game.userX), str(self.game.userO)]
player_names = [str(self.game.userX), str(self.game.userO)]
weapons = ['X', 'O']
if potential_winner == self.game.userX.user_id():
kills = [1,0]
deaths = [0,1]
ranks = [1601, 1599]
else:
kills = [0,1]
deaths = [1,0]
ranks = [1599, 1601]
setMatchMakerResults(self.game, 'leetcointactoe', player_keys, player_names, weapons, kills, deaths, ranks)
return True
## Check for a draw.
if self.game.moves >= 9:
player_keys = [str(self.game.userX), str(self.game.userO)]
player_names = [str(self.game.userX), str(self.game.userO)]
weapons = ['X', 'O']
kills = [0,0]
deaths = [0,0]
ranks = [1600, 1600]
setMatchMakerResults(self.game, 'leetcointactoe', player_keys, player_names, weapons, kills, deaths, ranks)
self.game.tied = True
return False
return False
def make_move(self, position, user):
logging.info('make_move')
logging.info('user: %s' %user)
logging.info('self.game.userX: %s' %self.game.userX)
logging.info('self.game.userO: %s' %self.game.userO)
if position >= 0 and str(user) == str(self.game.userX) or str(user) == str(self.game.userO):
logging.info('position >= 0 and user == self.game.userX or user == self.game.userO')
if self.game.moveX == (str(user) == str(self.game.userX)):
logging.info('self.game.moveX == (user == self.game.userX)')
boardList = list(self.game.board)
if (boardList[position] == ' '):
boardList[position] = 'X' if self.game.moveX else 'O'
self.game.board = "".join(boardList)
self.game.moveX = not self.game.moveX
self.game.moves = self.game.moves +1
logging.info('self.game.moves: %s' %self.game.moves)
win = self.check_win()
self.game.put()
self.send_update()
return
|
{
"content_hash": "06652d66b25e0f4b3358853dcbe99222",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 123,
"avg_line_length": 36.975,
"alnum_prop": 0.5215235519495154,
"repo_name": "LeetCoinTeam/lc_tactoe",
"id": "7a24e63833d312a6adb0c7373b9dcd09890ff35c",
"size": "4437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game_updater.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "HTML",
"bytes": "9353"
},
{
"name": "Python",
"bytes": "906908"
}
],
"symlink_target": ""
}
|
"""
Creates an RSS feed from HackerNews because there isn't a good one out there.
COPYRIGHT NOTICE
---------------
Copyright 2016-2017 David Lowry-Duda
You are free to redistribute and/or modify HNRSS under the
terms of the MIT License. A copy of this license should be made available with
the source.
I'm happy if you find HNRSS useful, but be advised that
it comes WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
from urllib.request import urlopen
from timeout import timeout
import feedmaker as rss2feed
import json
import time
import summarizer
import logging
logger = logging.getLogger(__name__)
# From the HN API
HN_API_BASE_URL = "https://hacker-news.firebaseio.com/v0/"
HN_BEST_URL = HN_API_BASE_URL + "beststories.json"
HN_TOP_URL = HN_API_BASE_URL + "topstories.json"
HN_NEW_URL = HN_API_BASE_URL + "newstories.json"
class HNrss(object):
"Create RSS feed for HackerNews"
def __init__(self,
api,
title="Unofficial HackerNews RSS",
link="https://news.ycombinator.com",
numposts=25):
#description="A work in progress",
self.title = title
self.link = link
self.description = ("Created and maintained by David Lowry-Duda "
"<davidlowryduda@davidlowryduda.com> "
"davidlowryduda.com")
self.api = api
self.numposts = numposts
self.xml = ""
self.feed = rss2feed.FeedMaker(title=self.title,
link=self.link,
description=self.description)
def generate_feed(self):
"""
Fill the rss feed with `numpost` posts, each with at most 5 comments.
Attempt to make a summary of the post using summarizer.py.
"""
post_ids = self._get_post_ids(self.api)
for pid in post_ids:
post_data = self._get_post_data(pid)
post_title = post_data.get('title', "")
post_score = post_data.get('score', "")
post_author = post_data.get('by', "")
post_kids = post_data.get('kids', "")
post_time = self._format_time(post_data.get('time'))
post_url = post_data.get('url', "")
post_text = post_data.get('text', "")
if not post_text:
post_text = ("<h2>Automated summary of {}.</h2>\n"
"[There may be errors].\n<p>").format(post_url)
try:
with timeout(seconds=15):
post_text += summarizer.summarize(post_url)
logger.debug("No problem occurred during summary")
except TimeoutError:
post_text += "Automated summary timed out. No summary available."
logger.info("Timeout occurred during automated summary.")
except Exception:
post_text += "Unknown error occurred during automated " + \
"summary. No Summary available."
logger.error("Automated summary failed for UNKNOWN reason")
post_text += "</p>"
post_text += ("<p>Current post score: {}. "
"Full comments are at "
"<a href='https://news.ycombinator.com/item?id={}'>"
"https://news.ycombinator.com/item?id={}</a></p>"
).format(post_score, pid, pid)
if post_kids:
post_text += ("<h3> Top Comments </h3><ol>\n\n")
for kid in post_kids[:4]:
kid_data = self._get_post_data(kid)
kid_text = ("<h3><li>{author} at {time}</h3>\n"
"<p>{text}</li>").format(
author=kid_data.get('by', 'Someone'),
time=self._format_time(kid_data.get('time')),
text=kid_data.get('text'))
post_text += kid_text
if post_kids:
post_text += "</ol>\n"
self.feed.append_item(title=post_title,
author=post_author,
link=post_url,
pubDate=post_time,
description=post_text)
def make_xml(self):
"Generate xml in `self.xml` from `self.feed`"
self.xml = self.feed.get_xml()
return
def _get_post_ids(self, url):
"Return a list containing the post ids."
return self._get_json_data(url)[:self.numposts]
def _get_post_data(self, post_id):
"""
Retrieve the content of the url corresponding to ``post_id`` and
parse it as a dictionary.
"""
data = self._get_json_data(HN_API_BASE_URL + "item/" \
+ str(post_id) + ".json?print=pretty")
return data
@staticmethod
def _format_time(epochtime):
"Translate Unix time into `Mon, 1 Jan 2015 12:01:01 -4:00` format."
if not epochtime:
epochtime = time.localtime()
return time.strftime("%a, %d %B %Y %X %Z", time.gmtime(epochtime))
@staticmethod
def _get_json_data(url):
"Retrieve the content of ``url`` and parse it as a dictionary."
response = urlopen(url)
data = response.read().decode(encoding='UTF-8')
return json.loads(data)
def main_top():
"Generate the feed for TOP posts"
hntest = HNrss(HN_TOP_URL, title="Unofficial HackerNews test post RSS")
hntest.generate_feed()
return hntest
if __name__ == "__main__":
HN_TOP = main_top()
with open("testrss.html", "w") as f:
f.write(HN_TOP.feed.get_xml())
|
{
"content_hash": "de3e01a3803a9a9246ca3c8860d3ef1e",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 37.0503144654088,
"alnum_prop": 0.531318961127143,
"repo_name": "davidlowryduda/hnrss",
"id": "e07d5565be7fea874611f874bc229bb8ea5973c1",
"size": "5939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hnrss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11428"
}
],
"symlink_target": ""
}
|
import random
import time
class RandomDelayModel:
"""
This model cause random delay after each test step between min and max value
"""
def __init__(self, min_delay, max_delay):
"""
:param min_delay: minimum delay in seconds
:param max_delay: maximum delay in seconds
"""
self.min = min_delay
self.max = max_delay
def after(self):
delay = random.uniform(self.min, self.max)
print(f'Sleeping {delay}')
time.sleep(delay)
|
{
"content_hash": "173e81947a77a22855e315448d28b012",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 24.476190476190474,
"alnum_prop": 0.5992217898832685,
"repo_name": "OPpuolitaival/pyosmo",
"id": "199193a31e57e3f2b629f330bedd3fc526623d87",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyosmo/models/random_delay_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45300"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
'''OpenGL extension NV.pixel_data_range
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_pixel_data_range'
_DEPRECATED = False
GL_WRITE_PIXEL_DATA_RANGE_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_NV', 0x8878 )
GL_READ_PIXEL_DATA_RANGE_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_NV', 0x8879 )
GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV', 0x887A )
glget.addGLGetConstant( GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV, (1,) )
GL_READ_PIXEL_DATA_RANGE_LENGTH_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_LENGTH_NV', 0x887B )
glget.addGLGetConstant( GL_READ_PIXEL_DATA_RANGE_LENGTH_NV, (1,) )
GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV = constant.Constant( 'GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV', 0x887C )
GL_READ_PIXEL_DATA_RANGE_POINTER_NV = constant.Constant( 'GL_READ_PIXEL_DATA_RANGE_POINTER_NV', 0x887D )
glPixelDataRangeNV = platform.createExtensionFunction(
'glPixelDataRangeNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLsizei,ctypes.c_void_p,),
doc='glPixelDataRangeNV(GLenum(target), GLsizei(length), c_void_p(pointer)) -> None',
argNames=('target','length','pointer',),
deprecated=_DEPRECATED,
)
glFlushPixelDataRangeNV = platform.createExtensionFunction(
'glFlushPixelDataRangeNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,),
doc='glFlushPixelDataRangeNV(GLenum(target)) -> None',
argNames=('target',),
deprecated=_DEPRECATED,
)
def glInitPixelDataRangeNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "b6e495a592be3004a65dee8d79026b00",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 106,
"avg_line_length": 43.73809523809524,
"alnum_prop": 0.773543821448013,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "b6df09bab59c473be980d6451f3acbcca569f61e",
"size": "1837",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/NV/pixel_data_range.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import unittest
import env
from src.BoardCubePool import BoardCubePool
from src import Enums
from src.MediatorResource import MediatorResource
#Unit testing framework will automatically consider any method starting with test as
#We use this and put all unit tests in a single class to use the setup feature
class TestBoardCubePool(unittest.TestCase):
#Between each test we want to reset the callback to having not been called
#Also we initiaze cube pool here to get reference to this object's callback function
def setUp(self):
self.wasEventFired = False
self.boardCubePool = BoardCubePool()
self.mediator = MediatorResource.Mediator
self.mediator.add_listener('cubes_unavaiable', self.handleEvent)
#Set flag to indicate we called the callback
def handleEvent(self, event):
self.wasEventFired = True
def testNoCallbackCallOnce(self):
self.boardCubePool.takeCube(1, Enums.Color.Red)
self.assertFalse(self.wasEventFired)
def testCallbackTake25(self):
self.boardCubePool.takeCube(25, Enums.Color.Red)
self.assertTrue(self.wasEventFired)
def testNoCallbackTake24Times(self):
for i in range(24):
self.boardCubePool.takeCube(1,Enums.Color.Yellow)
self.assertFalse(self.wasEventFired)
def testCallbackTake25Times(self):
for i in range(25):
self.boardCubePool.takeCube(1,Enums.Color.Black)
self.assertTrue(self.wasEventFired)
def testNoCallbackTakeEach10Times(self):
for i in range(10):
self.boardCubePool.takeCube(1, Enums.Color.Blue)
self.boardCubePool.takeCube(1, Enums.Color.Black)
self.boardCubePool.takeCube(1, Enums.Color.Red)
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
self.assertFalse(self.wasEventFired)
def testCallbackTakeEach10TimesAndTakeBlueMore(self):
for i in range(10):
self.boardCubePool.takeCube(1, Enums.Color.Blue)
self.boardCubePool.takeCube(1, Enums.Color.Black)
self.boardCubePool.takeCube(1, Enums.Color.Red)
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
for i in range(15):
self.boardCubePool.takeCube(1, Enums.Color.Blue)
self.assertTrue(self.wasEventFired)
def testNoCallbackTakeAndreturnCube(self):
for i in range(10):
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
self.boardCubePool.returnCube(1, Enums.Color.Yellow)
for i in range(15):
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
self.assertFalse(self.wasEventFired)
def testCallbackTakeAndReturnDifferentCube(self):
for i in range(10):
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
self.boardCubePool.takeCube(1, Enums.Color.Red)
self.boardCubePool.returnCube(1, Enums.Color.Red)
for i in range(15):
self.boardCubePool.takeCube(1, Enums.Color.Yellow)
self.assertTrue(self.wasEventFired)
def testExceptionsThrown(self):
raised = False
try:
self.boardCubePool.takeCube(1, None)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
raised = False
try:
self.boardCubePool.takeCube(0, Enums.Colors.Blue)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
raised = False
try:
self.boardCubePool.returnCube(1, None)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
raised = False
try:
self.boardCubePool.returnCube(0, Enums.Colors.Blue)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
def testCannotReturnCubesNotTaken(self):
raised = False
try:
self.boardCubePool.returnCube(1, Enums.Colors.Blue)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
raised = False
try:
self.boardCubePool.takeCube(3, Enums.Colors.Red)
self.boardCubePool.returnCube(4, Enums.Colors.Red)
except:
raised = True
self.assertTrue(raised, "Exception was not raised")
|
{
"content_hash": "6aabc20b8092f7477a9c8003ea9613b2",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 89,
"avg_line_length": 30.916083916083917,
"alnum_prop": 0.6534720651436327,
"repo_name": "coderjz/pypandemic",
"id": "486867b151b581cb9e7ef9bccbe2537c4525d1c5",
"size": "4421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/TestBoardCubePool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "Python",
"bytes": "22129"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource
from lasagne.nonlinearities import sigmoid
from lasagne.objectives import crossentropy
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* boolean targets
Changes:
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
Results:
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=True,
min_on_duration=60
# sample_period=15, seq_length=400
)
net = Net(
experiment_name="e41a",
source=source,
n_cells_per_hidden_layer=[50,50,50],
output_nonlinearity=sigmoid,
learning_rate=1e-1,
n_dense_cells_per_layer=50,
# validation_interval=2,
save_plot_interval=50,
loss_function=crossentropy
)
# [200,200,200] n_dense_cells=200 got killed before training
net.fit()
#net.plot_costs()
#net.plot_estimates()
|
{
"content_hash": "e883fb137c79cde393028e45340ba071",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 63,
"avg_line_length": 23.3125,
"alnum_prop": 0.707774798927614,
"repo_name": "mmottahedi/neuralnilm_prototype",
"id": "12c8af74e6845ebd410636fbaaf00dc106899893",
"size": "1119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e41.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_feopolicy_binding(base_resource) :
""" Binding class showing the feopolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._name = ""
self._targetlbvserver = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetlbvserver(self) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_feopolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_feopolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_feopolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_feopolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_feopolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_feopolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch csvserver_feopolicy_binding resources.
"""
try :
obj = csvserver_feopolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of csvserver_feopolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count csvserver_feopolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of csvserver_feopolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_feopolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_feopolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_feopolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_feopolicy_binding = [csvserver_feopolicy_binding() for _ in range(length)]
|
{
"content_hash": "6c9164ca99ab8a8e6809fbb79bc54677",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 211,
"avg_line_length": 30.268571428571427,
"alnum_prop": 0.7128563337738343,
"repo_name": "benfinke/ns_python",
"id": "cbbdd3fff82d9ad2f8216482e84978cc1c41b382",
"size": "11208",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_feopolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
import re
from amazonproduct.contrib.cart import Cart, Item
from amazonproduct.errors import AWSError
from amazonproduct.processors import BaseResultPaginator, BaseProcessor
from amazonproduct.processors import ITEMS_PAGINATOR, RELATEDITEMS_PAGINATOR
from amazonproduct.utils import import_module
implementations = [
'xml.etree.cElementTree',
'xml.etree.ElementTree',
'cElementTree',
'elementtree.ElementTree',
]
def _load_elementtree_module(*modules):
"""
Returns the first importable ElementTree implementation from a list of
modules. If ``modules`` is omitted :data:`implementations` is used.
"""
if not modules:
modules = implementations
for mod in modules:
try:
return import_module(mod)
except ImportError:
pass
raise ImportError(
"Couldn't find any of the ElementTree implementations in %s!" % (
list(modules), ))
_nsreg = re.compile('^({.+?})')
def extract_nspace(element):
"""
Extracts namespace from XML element. If no namespace is found, ``''``
(empty string) is returned.
"""
m = _nsreg.search(element.tag)
if m: return m.group(1)
return ''
class XPathPaginator (BaseResultPaginator):
"""
Result paginator using XPath expressions to extract page and result
information from XML.
"""
counter = current_page_xpath = total_pages_xpath = total_results_xpath = None
def paginator_data(self, root):
nspace = extract_nspace(root)
def fetch_value(xpath, default):
try:
path = xpath.replace('{}', nspace)
# ElementTree does not seem to support XPath expressions to
# be combined with | separator!
for expr in path.split('|'):
node = root.findtext(expr)
if node is not None:
return int(node)
return default
except (ValueError, TypeError):
return default
return map(lambda a: fetch_value(*a), [
(self.current_page_xpath, 1),
(self.total_pages_xpath, 0),
(self.total_results_xpath, 0)
])
def iterate(self, root):
nspace = extract_nspace(root)
xpath = self.items.replace('{}', nspace)
return root.findall(xpath)
class ItemPaginator (XPathPaginator):
counter = 'ItemPage'
current_page_xpath = './/{}Items/{}Request/{}ItemSearchRequest/{}ItemPage'
total_pages_xpath = './/{}Items/{}TotalPages'
total_results_xpath = './/{}Items/{}TotalResults'
items = './/{}Items/{}Item'
class RelatedItemsPaginator (XPathPaginator):
counter = 'RelatedItemPage'
current_page_xpath = './/{}RelatedItemPage'
total_pages_xpath = './/{}RelatedItems/{}RelatedItemPageCount'
total_results_xpath = './/{}RelatedItems/{}RelatedItemCount'
items = './/{}RelatedItems/{}RelatedItem/{}Item'
class Processor (BaseProcessor):
"""
Result processor using ElementTree.
The first implementation of ElementTree which can be successfully imported
will be used. Order of import is:
* xml.etree.cElementTree
* xml.etree.ElementTree
* cElementTree
* elementtree.ElementTree
"""
paginators = {
ITEMS_PAGINATOR: ItemPaginator,
RELATEDITEMS_PAGINATOR: RelatedItemsPaginator,
}
def __init__(self, *args, **kwargs):
# processor can be told which etree module to use in order to have
# multiple processors each using a different implementation
etree_mod = kwargs.pop('module', None)
try:
if etree_mod:
self.etree = _load_elementtree_module(etree_mod)
else:
self.etree = _load_elementtree_module()
except (AttributeError, ImportError):
self.etree = None
def parse(self, fp):
root = self.etree.parse(fp).getroot()
ns = extract_nspace(root)
errors = root.findall('.//%sError' % ns)
for error in errors:
raise AWSError(
code=error.findtext('./%sCode' % ns),
msg=error.findtext('./%sMessage' % ns),
xml=root)
return root
def __repr__(self): # pragma: no cover
return '<%s using %s at %s>' % (
self.__class__.__name__, getattr(self.etree, '__name__', '???'), hex(id(self)))
@classmethod
def parse_cart(cls, node):
"""
Returns an instance of :class:`amazonproduct.contrib.Cart` based on
information extracted from ``node``.
"""
_nspace = extract_nspace(node)
_xpath = lambda path: path.replace('{}', _nspace)
root = node.find(_xpath('.//{}Cart'))
cart = Cart()
cart.cart_id = root.findtext(_xpath('./{}CartId'))
cart.hmac = root.findtext(_xpath('./{}HMAC'))
cart.url = root.findtext(_xpath('./{}PurchaseURL'))
def parse_item(item_node):
item = Item()
item.item_id = item_node.findtext(_xpath('./{}CartItemId'))
item.asin = item_node.findtext(_xpath('./{}ASIN'))
item.seller = item_node.findtext(_xpath('./{}SellerNickname'))
item.quantity = int(item_node.findtext(_xpath('./{}Quantity')))
item.title = item_node.findtext(_xpath('./{}Title'))
item.product_group = item_node.findtext(_xpath('./{}ProductGroup'))
item.price = (
int(item_node.findtext(_xpath('./{}Price/{}Amount'))),
item_node.findtext(_xpath('./{}Price/{}CurrencyCode')))
item.total = (
int(item_node.findtext(_xpath('./{}ItemTotal/{}Amount'))),
item_node.findtext(_xpath('./{}ItemTotal/{}CurrencyCode')))
return item
try:
for item_node in root.findall(_xpath('./{}CartItems/{}CartItem')):
cart.items.append(parse_item(item_node))
cart.subtotal = (node.SubTotal.Amount, node.SubTotal.CurrencyCode)
except AttributeError:
cart.subtotal = (None, None)
return cart
|
{
"content_hash": "7c67c591248cfa97274335d0ec670002",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 91,
"avg_line_length": 33.75,
"alnum_prop": 0.5713159968479118,
"repo_name": "knittledan/Location_Search_Prediction",
"id": "8e549aa9d82f913c4ae2da40c8b137f98366a04d",
"size": "6533",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thirdParty/amazonproduct/processors/elementtree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "116630"
},
{
"name": "CSS",
"bytes": "549642"
},
{
"name": "JavaScript",
"bytes": "2980664"
},
{
"name": "Perl",
"bytes": "3024"
},
{
"name": "Python",
"bytes": "2551419"
},
{
"name": "Shell",
"bytes": "20806"
},
{
"name": "XSLT",
"bytes": "739855"
}
],
"symlink_target": ""
}
|
import logging
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class StacksTestJSON(base.BaseOrchestrationTest):
@classmethod
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'trigger': 'start',
'image': image_id
})
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.resource_name = 'fluffy'
cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
def _list_stacks(self, expected_num=None, **filter_kwargs):
resp, stacks = self.client.list_stacks(params=filter_kwargs)
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
if expected_num is not None:
self.assertEqual(expected_num, len(stacks))
return stacks
@test.attr(type='gate')
def test_stack_list(self):
"""Created stack should be in the list of existing stacks."""
stacks = self._list_stacks()
stacks_names = map(lambda stack: stack['stack_name'], stacks)
self.assertIn(self.stack_name, stacks_names)
@test.attr(type='gate')
def test_stack_show(self):
"""Getting details about created stack should be possible."""
resp, stack = self.client.get_stack(self.stack_name)
self.assertEqual('200', resp['status'])
self.assertIsInstance(stack, dict)
self.assert_fields_in_dict(stack, 'stack_name', 'id', 'links',
'parameters', 'outputs', 'disable_rollback',
'stack_status_reason', 'stack_status',
'creation_time', 'updated_time',
'capabilities', 'notification_topics',
'timeout_mins', 'template_description')
self.assert_fields_in_dict(stack['parameters'], 'AWS::StackId',
'trigger', 'AWS::Region', 'AWS::StackName')
self.assertEqual(True, stack['disable_rollback'],
'disable_rollback should default to True')
self.assertEqual(self.stack_name, stack['stack_name'])
self.assertEqual(self.stack_id, stack['id'])
self.assertEqual('fluffy', stack['outputs'][0]['output_key'])
@test.attr(type='gate')
def test_suspend_resume_stack(self):
"""Suspend and resume a stack."""
resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.client.wait_for_stack_status(self.stack_identifier,
'SUSPEND_COMPLETE')
resp, resume_stack = self.client.resume_stack(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.client.wait_for_stack_status(self.stack_identifier,
'RESUME_COMPLETE')
@test.attr(type='gate')
def test_list_resources(self):
"""Getting list of created resources for the stack should be possible.
"""
resources = self.list_resources(self.stack_identifier)
self.assertEqual({self.resource_name: self.resource_type}, resources)
@test.attr(type='gate')
def test_show_resource(self):
"""Getting details about created resource should be possible."""
resp, resource = self.client.get_resource(self.stack_identifier,
self.resource_name)
self.assertIsInstance(resource, dict)
self.assert_fields_in_dict(resource, 'resource_name', 'description',
'links', 'logical_resource_id',
'resource_status', 'updated_time',
'required_by', 'resource_status_reason',
'physical_resource_id', 'resource_type')
self.assertEqual(self.resource_name, resource['logical_resource_id'])
self.assertEqual(self.resource_type, resource['resource_type'])
@test.attr(type='gate')
def test_resource_metadata(self):
"""Getting metadata for created resources should be possible."""
resp, metadata = self.client.show_resource_metadata(
self.stack_identifier,
self.resource_name)
self.assertEqual('200', resp['status'])
self.assertIsInstance(metadata, dict)
self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
@test.attr(type='gate')
def test_list_events(self):
"""Getting list of created events for the stack should be possible."""
resp, events = self.client.list_events(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.assertIsInstance(events, list)
for event in events:
self.assert_fields_in_dict(event, 'logical_resource_id', 'id',
'resource_status_reason',
'resource_status', 'event_time')
resource_statuses = map(lambda event: event['resource_status'], events)
self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
self.assertIn('CREATE_COMPLETE', resource_statuses)
@test.attr(type='gate')
def test_show_event(self):
"""Getting details about an event should be possible."""
resp, events = self.client.list_resource_events(self.stack_identifier,
self.resource_name)
self.assertNotEqual([], events)
events.sort(key=lambda event: event['event_time'])
event_id = events[0]['id']
resp, event = self.client.show_event(self.stack_identifier,
self.resource_name, event_id)
self.assertEqual('200', resp['status'])
self.assertIsInstance(event, dict)
self.assert_fields_in_dict(event, 'resource_name', 'event_time',
'links', 'logical_resource_id',
'resource_status', 'resource_status_reason',
'physical_resource_id', 'id',
'resource_properties', 'resource_type')
self.assertEqual(self.resource_name, event['resource_name'])
self.assertEqual('state changed', event['resource_status_reason'])
self.assertEqual(self.resource_name, event['logical_resource_id'])
|
{
"content_hash": "e255e9ed3083070a8f8057975e33bd2b",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 47.42567567567568,
"alnum_prop": 0.5798546801538681,
"repo_name": "Mirantis/tempest",
"id": "a97c5615272ad36e08f88e2bd04ce496ccb1b9e5",
"size": "7592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/orchestration/stacks/test_non_empty_stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
}
|
import sys
import os
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openrange'
copyright = u'2015, Josh Tomlinson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openrangedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'openrange.tex', u'openrange Documentation',
u'Josh Tomlinson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openrange', u'openrange Documentation',
[u'Josh Tomlinson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'openrange', u'openrange Documentation',
u'Josh Tomlinson', 'openrange', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "77c2d71125e2b6c5da314e80dd06e62d",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 79,
"avg_line_length": 31.678030303030305,
"alnum_prop": 0.7039339949778788,
"repo_name": "josh-t/OpenRange",
"id": "e09f4bc9d537002127ef488aa5bfabbe79dfa61b",
"size": "8785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39734"
}
],
"symlink_target": ""
}
|
import sys
import os
import getpass
import urllib.request, urllib.parse
import http.cookiejar
import html.parser
# =============================================================================================================================================================
# =============================================================================================================================================================
# =============================================================================================================================================================
class TestflightJanitor(object):
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def __init__(self):
self.cookiesJar = http.cookiejar.CookieJar()
self.cookiesHandler = urllib.request.HTTPCookieProcessor(self.cookiesJar)
self.redirectHandler = urllib.request.HTTPRedirectHandler()
self.opener = urllib.request.build_opener(self.cookiesHandler, self.redirectHandler)
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def dumpCookies(self):
for cookie in self.cookiesJar:
print(cookie.name + "=" + cookie.value)
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def getSession(self):
request = urllib.request.Request("https://www.testflightapp.com/login/")
response = self.opener.open(request);
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def login(self, username, password):
self.getSession()
data = {
"csrfmiddlewaretoken" : next(cookie.value for cookie in self.cookiesJar if cookie.name == "csrftoken"),
"username" : username,
"password" : password
}
postData = urllib.parse.urlencode(data).encode('ascii')
request = urllib.request.Request("https://www.testflightapp.com/login/", postData, {}, "www.testflightapp.com/login/")
response = self.opener.open(request);
return True
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
class PagesCollectionParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.maxPageNumber = 0
self.prefix = "/dashboard/applications/750280/builds/?page="
def handle_starttag(self, tag, attrs):
if tag != "a":
return
for k, v in attrs:
if k != "href" or not v.startswith(self.prefix):
continue
self.maxPageNumber = max(self.maxPageNumber, int(v[len(self.prefix):]))
def handle_endtag(self, tag):
return
def handle_data(self, data):
return
def getBuilds(self, appId):
request = urllib.request.Request("https://www.testflightapp.com/dashboard/applications/%d/builds/" % (appId))
response = self.opener.open(request);
htmlContent = response.read().decode('ascii')
parser = self.PagesCollectionParser()
parser.feed(htmlContent)
print("Found %d pages with builds" % (parser.maxPageNumber))
builds = []
for pageIndex in range(parser.maxPageNumber):
buildsOnPage = self.getBuildsFromPage(appId, pageIndex)
builds = builds + buildsOnPage
return builds
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
class BuildsOnPageParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.builds = []
self.prefix = "/dashboard/builds/crashes/"
def handle_starttag(self, tag, attrs):
if tag != "a":
return
for k, v in attrs:
if k != "href" or not v.startswith(self.prefix):
continue
self.builds.append(int(v[len(self.prefix):]))
def handle_endtag(self, tag):
return
def handle_data(self, data):
return
def getBuildsFromPage(self, appId, pageIndex):
request = urllib.request.Request("https://www.testflightapp.com/dashboard/applications/%d/builds/?page=%d" % (appId, pageIndex + 1))
response = self.opener.open(request);
htmlContent = response.read().decode('ascii')
parser = self.BuildsOnPageParser()
parser.feed(htmlContent)
print("Found %d builds on page %d" % (len(parser.builds), pageIndex + 1))
return parser.builds
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def deleteBuild(self, buildId):
print("Deleting build #%d..." % (buildId))
page = "https://www.testflightapp.com/dashboard/builds/delete/%d/" % (buildId)
data = {
"csrfmiddlewaretoken" : next(cookie.value for cookie in self.cookiesJar if cookie.name == "csrftoken")
}
postData = urllib.parse.urlencode(data).encode('ascii')
request = urllib.request.Request(page, postData, {}, page)
response = self.opener.open(request);
# ---------------------------------------------------------------------------------------------------------------------------------------------------------
def cleanup(self, username, password, appId, buildsToKeep):
ok = self.login(username, password)
if ok == False:
return False
builds = self.getBuilds(appId)
buildsToDelete = builds[buildsToKeep:]
print("Going to delete oldest %d builds from %d..." % (len(buildsToDelete), len(builds)))
for buildId in buildsToDelete:
self.deleteBuild(buildId)
return True
# =============================================================================================================================================================
# =============================================================================================================================================================
# =============================================================================================================================================================
if __name__=='__main__':
username = input('Username: ')
password = getpass.getpass("Password: ")
appId = int(input("Application ID: "))
buildsToKeep = int(input("Number of builds to keep: "))
janitor = TestflightJanitor()
sys.exit(0 if janitor.cleanup(username, password, appId, buildsToKeep) else -1)
|
{
"content_hash": "e3860ae828650be888ece59064a35b28",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 159,
"avg_line_length": 47.695035460992905,
"alnum_prop": 0.4422304832713755,
"repo_name": "Zahnstocher/OsmAnd-ios",
"id": "27f7b14adbd0dc7c37237518da163a7d4857363b",
"size": "6767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cleanup-testflight.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1848"
},
{
"name": "C++",
"bytes": "8575"
},
{
"name": "Objective-C",
"bytes": "1054792"
},
{
"name": "Objective-C++",
"bytes": "3017458"
},
{
"name": "Python",
"bytes": "23237"
},
{
"name": "Ruby",
"bytes": "3183"
},
{
"name": "Shell",
"bytes": "9380"
}
],
"symlink_target": ""
}
|
import os, time, codecs, yaml, requests
from optparse import OptionParser
from BeautifulSoup import BeautifulSoup
from random import choice
parser = OptionParser(usage='usage: %prog [options] output')
parser.add_option('-v', action='count', dest='verbosity', default=0, help='increase output verbosity')
parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help='hide all output')
parser.add_option('-c', '--config', dest='config', default='config.yaml', type='string', help='YAML configuration file (default: config.yaml)')
parser.add_option('-o', '--offset', dest='offset', default=100, type='int', help='number of records to fetch per page (default: 100)')
parser.add_option('-l', '--limit', dest='limit', default=200000, type='int', help='number of records to fetch per search (default: 10,000)')
parser.add_option('-p', '--pause', dest='pause', default=5, type='int', help='number of seconds to wait between pagination request (default: 5)')
(options, args) = parser.parse_args()
uniq_list = []
def load_config(filename):
''' Loads and validates the configuration file data '''
fh = open(filename)
config = yaml.load(fh)
fh.close()
if 'api' not in config:
parser.error('%s is missing the "api" tree root' % filename)
if 'endpoint' not in config['api']:
parser.error('%s is missing the "api/endpoint" subtree' % filename)
if 'search' not in config['api']:
parser.error('%s is missing the "api/search" subtree' % filename)
return config
def headers(args):
''' Adds the custom User-Agent to the request headers '''
if args.get('headers') is None:
args['headers'] = dict()
args['headers'].update({ 'User-Agent':'msnfetch/1.1 (+http://support.tlmdservices.com/)' })
return args
def search(searchurl, searchdata, output):
page = 0
records = 0
hooks = dict(args=headers)
if options.verbosity >= 3 and not options.quiet:
print '[%s] DEBUG: executing %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), searchdata)
while records < options.limit:
if options.verbosity >= 2 and not options.quiet:
print '[%s] INFO: fetching page %d' % (time.strftime('%Y-%m-%d %H:%M:%S'), page+1)
searchdata['ind'] = (page * options.offset)
pagerequest = requests.get(searchurl, hooks=hooks, params=searchdata)
if options.verbosity >= 3 and not options.quiet:
print '[%s] DEBUG: %s (%d)' % (time.strftime('%Y-%m-%d %H:%M:%S'), pagerequest.url, pagerequest.status_code)
pagesoup = BeautifulSoup(pagerequest.content)
pagevideos = pagesoup('video')
if len(pagevideos) > 0:
for pagevideo in pagevideos:
uuid = pagevideo('uuid')[0]
title = pagevideo('title')[0]
refid = uuid['ref']
if not any(refid == x for x in uniq_list):
output.write(u'%s\t%s\t%s\n' % (uuid.contents[0], refid, title.contents[0]))
uniq_list.append(refid)
records += 1
if records == options.limit:
if options.verbosity >= 2 and not options.quiet:
print '[%s] INFO: max number of records reached' % (time.strftime('%Y-%m-%d %H:%M:%S'))
return records
if options.verbosity >= 2 and not options.quiet:
print '[%s] INFO: sleeping for %d seconds' % (time.strftime('%Y-%m-%d %H:%M:%S'), options.pause)
time.sleep(options.pause)
page += 1
else:
if options.verbosity >= 1 and not options.quiet:
print '[%s] NOTICE: no more records found' % (time.strftime('%Y-%m-%d %H:%M:%S'))
return records
return records
def main():
if len(args) < 1:
parser.error('you must specify the output file.')
filename = args[0]
if not os.path.exists(options.config):
parser.error('the configuration file %s does not exist.' % options.config)
config = load_config(options.config)
basedict = {
'sf': 'ActiveStartDate', 'sd': -1, 'ps': options.offset
}
records = 0
if len(config['api']['search']) > 0:
fp = codecs.open(filename, 'w', 'utf-8')
for searchdata in config['api']['search']:
endpoint = choice(config['api']['endpoint'])
searchurl = endpoint.rstrip('/') + '/' + searchdata['method']
searchdict = dict(basedict.items() + searchdata['params'].items())
records += search(searchurl, searchdict, fp)
if options.verbosity >= 2 and not options.quiet:
print '[%s] INFO: sleeping for %d seconds' % (time.strftime('%Y-%m-%d %H:%M:%S'), options.pause)
time.sleep(options.pause)
if options.verbosity >= 1 and not options.quiet:
print '[%s] NOTICE: downloaded %d records' % (time.strftime('%Y-%m-%d %H:%M:%S'), records)
fp.close()
else:
if not options.quiet:
print '[%s] WARNING: there are no search entries in the configuration file' % (time.strftime('%Y-%m-%d %H:%M:%S'))
if __name__ == '__main__':
main()
|
{
"content_hash": "3cf827699394438ff381de3a7275b3ef",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 145,
"avg_line_length": 48.62264150943396,
"alnum_prop": 0.5941016686069073,
"repo_name": "telemundo/msn-crawler",
"id": "c2d590d09c4a405e836a7abaec673923db77030c",
"size": "5201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/destructible/shared_destructible_item_barrel.iff"
result.attribute_template_id = -1
result.stfName("container_name","debris")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "0e184e4d7a8540151bede5f9fc66ce4a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.7060702875399361,
"repo_name": "anhstudios/swganh",
"id": "53bc1dd994338628d2394040d14777c613a15b43",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/destructible/shared_destructible_item_barrel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
Test Program Creator role
"""
from integration.ggrc import TestCase
from ggrc.models import get_model
from ggrc.models import all_models
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import Generator
from integration.ggrc.generator import ObjectGenerator
class TestCreator(TestCase):
""" TestCreator """
def setUp(self):
super(TestCreator, self).setUp()
self.generator = Generator()
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("admin", "Administrator")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def test_admin_page_access(self):
for role, code in (("creator", 403), ("admin", 200)):
self.api.set_user(self.users[role])
self.assertEqual(self.api.tc.get("/admin").status_code, code)
def test_creator_can_crud(self):
""" Test Basic create/read,update/delete operations """
self.api.set_user(self.users["creator"])
all_errors = []
base_models = set([
"Control", "DataAsset", "Contract",
"Policy", "Regulation", "Standard", "Document", "Facility",
"Market", "Objective", "OrgGroup", "Vendor", "Product",
"Clause", "System", "Process", "Project", "AccessGroup"
])
for model_singular in base_models:
try:
model = get_model(model_singular)
table_singular = model._inflector.table_singular
table_plural = model._inflector.table_plural
# Test POST creation
response = self.api.post(model, {
table_singular: {
"title": model_singular,
"context": None,
"reference_url": "ref",
"contact": {
"type": "Person",
"id": self.users["creator"].id,
},
},
})
if response.status_code != 201:
all_errors.append("{} post creation failed {} {}".format(
model_singular, response.status, response.data))
continue
# Test GET when not owner
obj_id = response.json.get(table_singular).get("id")
response = self.api.get(model, obj_id)
if response.status_code != 403: # we are not onwers yet
all_errors.append(
"{} can retrieve object if not owner".format(model_singular))
continue
response = self.api.get_collection(model, obj_id)
collection = response.json.get(
"{}_collection".format(table_plural)).get(table_plural)
if len(collection) != 0:
all_errors.append(
"{} can retrieve object if not owner (collection)"
.format(model_singular))
continue
# Become an owner
response = self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": model_singular,
"id": obj_id
}, "context": None}})
if response.status_code != 201:
all_errors.append("{} can't create owner {}.".format(
model_singular, response.status))
continue
# Test GET when owner
response = self.api.get(model, obj_id)
if response.status_code != 200:
all_errors.append("{} can't GET object {}".format(
model_singular, response.status))
continue
# Test GET collection when owner
response = self.api.get_collection(model, obj_id)
collection = response.json.get(
"{}_collection".format(table_plural)).get(table_plural)
if len(collection) == 0:
all_errors.append(
"{} cannot retrieve object even if owner (collection)"
.format(model_singular))
continue
except:
all_errors.append("{} exception thrown".format(model_singular))
raise
self.assertEqual(all_errors, [])
def test_creator_search(self):
"""Test if creator can see the correct object while using the search api"""
self.api.set_user(self.users['admin'])
self.api.post(all_models.Regulation, {
"regulation": {"title": "Admin regulation", "context": None},
})
self.api.set_user(self.users['creator'])
response = self.api.post(all_models.Policy, {
"policy": {"title": "Creator Policy", "context": None},
})
obj_id = response.json.get("policy").get("id")
self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": "Policy",
"id": obj_id,
}, "context": None}})
response, _ = self.api.search("Regulation,Policy")
entries = response.json["results"]["entries"]
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0]["type"], "Policy")
response, _ = self.api.search("Regulation,Policy", counts=True)
self.assertEqual(response.json["results"]["counts"]["Policy"], 1)
self.assertEqual(
response.json["results"]["counts"].get("Regulation"), None)
def _get_count(self, obj):
""" Return the number of counts for the given object from search """
response, _ = self.api.search(obj, counts=True)
return response.json["results"]["counts"].get(obj)
def test_creator_should_see_users(self):
""" Test if creator can see all the users in the system """
self.api.set_user(self.users['admin'])
admin_count = self._get_count("Person")
self.api.set_user(self.users['creator'])
creator_count = self._get_count("Person")
self.assertEqual(admin_count, creator_count)
def test_creator_cannot_be_owner(self):
"""Test if creator cannot become owner of the object he has not created"""
self.api.set_user(self.users['admin'])
_, obj = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation", "context": None},
})
self.api.set_user(self.users['creator'])
response = self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": "Regulation",
"id": obj.id,
}, "context": None}})
self.assertEqual(response.status_code, 403)
def test_relationships_access(self):
"""Check if creator cannot access relationship objects"""
self.api.set_user(self.users['admin'])
_, obj_0 = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation", "context": None},
})
_, obj_1 = self.generator.generate(all_models.Regulation, "regulation", {
"regulation": {"title": "Test regulation 2", "context": None},
})
response, rel = self.generator.generate(
all_models.Relationship, "relationship", {
"relationship": {"source": {
"id": obj_0.id,
"type": "Regulation"
}, "destination": {
"id": obj_1.id,
"type": "Regulation"
}, "context": None},
}
)
relationship_id = rel.id
self.assertEqual(response.status_code, 201)
self.api.set_user(self.users['creator'])
response = self.api.get_collection(all_models.Relationship,
relationship_id)
self.assertEqual(response.status_code, 200)
num = len(response.json["relationships_collection"]["relationships"])
self.assertEqual(num, 0)
def test_revision_access(self):
"""Check if creator can access the right revision objects."""
def gen(title):
return self.generator.generate(all_models.Section, "section", {
"section": {"title": title, "context": None},
})[1]
def check(obj, expected):
"""Check that how many revisions of an object current user can see."""
response = self.api.get_query(
all_models.Revision,
"resource_type={}&resource_id={}".format(obj.type, obj.id)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json['revisions_collection']['revisions']),
expected
)
self.api.set_user(self.users["admin"])
obj_1 = gen("Test Section 1")
obj_2 = gen("Test Section 2")
self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": self.users['creator'].id,
"type": "Person",
}, "ownable": {
"type": "Section",
"id": obj_2.id,
}, "context": None}})
self.api.set_user(self.users["creator"])
check(obj_1, 0)
check(obj_2, 2)
|
{
"content_hash": "486501cc0c8654fd1779b1cb56f9a051",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 79,
"avg_line_length": 37.17768595041322,
"alnum_prop": 0.580971434922752,
"repo_name": "plamut/ggrc-core",
"id": "ffd78d84375b84fcbf2ce8c86426200c74668ba2",
"size": "9110",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/integration/ggrc_basic_permissions/test_creator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "229800"
},
{
"name": "HTML",
"bytes": "1060475"
},
{
"name": "JavaScript",
"bytes": "1951072"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2839040"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['MLP'] );
|
{
"content_hash": "8cefae5f671d3232333156c7eb4a8e01",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 80,
"avg_line_length": 38.25,
"alnum_prop": 0.6993464052287581,
"repo_name": "antoinecarme/pyaf",
"id": "6ace72d07f35cccb9f6cd16635d1e8e1f5b4117b",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_Lag1Trend_Seasonal_Hour_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
def generate_image(xsize, ysize):
from scitbx.array_family import flex
image = flex.random_double(xsize * ysize)
image.reshape(flex.grid(ysize, xsize))
return image
def generate_mask(xsize, ysize):
from scitbx.array_family import flex
mask = flex.random_bool(xsize * ysize, 0.9)
mask.reshape(flex.grid(ysize, xsize))
return mask
def test_filter():
from numpy import median
from dials.algorithms.image.filter import median_filter
xsize = 200
ysize = 300
kernel = (3, 3)
image = generate_image(xsize, ysize)
result = median_filter(image, kernel)
eps = 1e-7
for j in range(kernel[0], ysize - kernel[0]):
for i in range(kernel[1], xsize - kernel[1]):
j0 = j - kernel[0]
j1 = j + kernel[0] + 1
i0 = i - kernel[1]
i1 = i + kernel[1] + 1
pixels = image[j0:j1, i0:i1]
value = median(pixels.as_numpy_array())
assert result[j, i] == pytest.approx(value, abs=eps)
def test_masked_filter():
from numpy import median
from dials.algorithms.image.filter import median_filter
xsize = 200
ysize = 300
kernel = (3, 3)
image = generate_image(xsize, ysize)
mask = generate_mask(xsize, ysize)
result = median_filter(image, mask, kernel)
eps = 1e-7
for j in range(kernel[0], ysize - kernel[0]):
for i in range(kernel[1], xsize - kernel[1]):
if mask[j, i]:
j0 = j - kernel[0]
j1 = j + kernel[0] + 1
i0 = i - kernel[1]
i1 = i + kernel[1] + 1
pixels = image[j0:j1, i0:i1]
pmask = mask[j0:j1, i0:i1]
pixels = pixels.as_1d().select(pmask.as_1d())
if len(pixels) & 1:
value = median(pixels.as_numpy_array())
else:
pixels = sorted(pixels)
value = pixels[len(pixels) // 2]
assert result[j, i] == pytest.approx(value, abs=eps)
|
{
"content_hash": "67ecb4d3ab367a03a9f03b7782f5289c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 68,
"avg_line_length": 27.350649350649352,
"alnum_prop": 0.5470085470085471,
"repo_name": "dials/dials",
"id": "45d2fca7581538bef3f73cc24bb1853bd987fe0e",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/algorithms/image/filter/test_median.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
}
|
from create import create
from delete import delete
from list import list
from usage import usage
__all__ = [
'create',
'delete',
'list',
'usage'
]
|
{
"content_hash": "80314d0c3a184da1fe92e036b827f0aa",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 25,
"avg_line_length": 15,
"alnum_prop": 0.6424242424242425,
"repo_name": "absalon-james/randomload",
"id": "2264e22a0e674fb53a100c90c4b0d5caaca43eaf",
"size": "165",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "randomload/actions/cinder/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26779"
}
],
"symlink_target": ""
}
|
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
import sys, os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst(Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
def finalize_options(self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,))
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
def run(self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = key.upper()
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def get_inidata(self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return s.replace("\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(name.capitalize(), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return "\n".join(lines)
def create_exe(self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
if isinstance(cfgdata, str):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + b"\0"
if self.pre_install_script:
# We need to normalize newlines, so we open in text mode and
# convert back to bytes. "latin-1" simply avoids any possible
# failures.
with open(self.pre_install_script, "r",
encoding="latin-1") as script:
script_data = script.read().encode("latin-1")
cfgdata = cfgdata + script_data + b"\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + b"\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
def get_exe_bytes(self):
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version and self.target_version < cur_version:
if self.target_version < "2.4":
bv = 6.0
elif self.target_version == "2.4":
bv = 7.1
elif self.target_version == "2.5":
bv = 8.0
elif self.target_version <= "3.2":
bv = 9.0
elif self.target_version <= "3.4":
bv = 10.0
else:
bv = 14.0
else:
# for current version - use authoritative check.
try:
from msvcrt import CRT_ASSEMBLY_VERSION
except ImportError:
# cross-building, so assume the latest version
bv = 14.0
else:
bv = float('.'.join(CRT_ASSEMBLY_VERSION.split('.', 2)[:2]))
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
f = open(filename, "rb")
try:
return f.read()
finally:
f.close()
|
{
"content_hash": "374cb2b1eda96d3bf21a91c0829a3318",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 91,
"avg_line_length": 43.21883656509695,
"alnum_prop": 0.5351237020894757,
"repo_name": "Suwmlee/XX-Net",
"id": "f419da98bcb37abffc91414aba68441b38322205",
"size": "15602",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "Python3/lib/distutils/command/bdist_wininst.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
}
|
import uuid, time, hashlib, shutil, random, os, sys
from M2Crypto import Rand, RSA, BIO, EVP
from twisted.internet import defer
from zeitcoindb import hashtable
from twisted.web.client import getPage
from twisted.internet import reactor
ADDRESS='127.0.0.1'
FNAME='zeitcoin'
PORT=1234
class utility:
def __init__(self,filename,address,port):
self.filename = filename
self.address = address
self.port = port
def generateguid(self):
guid=str(uuid.uuid4())
guid=guid.replace("-","")
#print 'Getuuid: = %s' % (guid)
return guid
def getdistance(self,peer1,peer2):
# Will use Kademlia's Distance Function as it seems to be the best way of getting
# distance between peers compared to others such as Chord, Pastry and Tapestry
# Kademlia's Distance Function is defined as [distance = peer1 XOR peer2]
p1=long(peer1,16)
p2=long(peer2,16)
distance=p1^p2
return distance
def backupwallet(self,backupname):
shutil.copy2('zeitcoin.db', backupname)
return
def checkpeer(self, guid, address, port):
# already did this damn it db - checkguid REDUNTANT
ht=hashtable()
time1=self.gettimestamp()
flag=0
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tcheckguid(dbpool,guid))
yield wfd
data1 = wfd.getResult()
wfd = defer.waitForDeferred(ht.tcheckpeer(dbpool,guid,address,port))
yield wfd
data2 = wfd.getResult()
if (data1==True and data2==True):
wfd = defer.waitForDeferred(ht.tupdatetime(dbpool,guid))
yield wfd
elif (data1==False and data2==False):
wfd = defer.waitForDeferred(ht.taddht(dbpool,guid,address,port,flag,time1))
yield wfd
else:
wfd = defer.waitForDeferred(ht.tupdatepeer(self,dbpool,guid,address,port))
yield wfd
ht.tclosedb(dbpool)
return #result
@defer.deferredGenerator
def messagefail(self,message, guid):
print message
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
wfd = defer.waitForDeferred(ht.tdeleteht(dbpool,guid))
yield wfd
ht.tclosedb(dbpool)
return
@defer.deferredGenerator
def updatepeer(self,guid,address,port):
ht=hashtable()
time1=self.gettimestamp()
dbpool=ht.tconnectdb(self.filename)
wfd = defer.waitForDeferred(ht.tcheckguid(dbpool,guid))
yield wfd
check1 = wfd.getResult()
if (check1==True):
# Check to see if peer exist in hastable if so update flag and timestamp
print "the peer already exist in the hasttable, updating flag and timestamp"
wfd = defer.waitForDeferred(ht.tupdatepeer(dbpool,guid,address,port))
yield wfd
else:
# else check if hashtable is full more than 128 peers
print "the peer is not in the hashtable"
wfd = defer.waitForDeferred(ht.tcheckfullht(dbpool))
yield wfd
check2 = wfd.getResult()
if (check2==False):
print "The hash table is not full"
# if not than add the new peer to the hash table
wfd = defer.waitForDeferred(ht.taddht(dbpool,guid,address,port,'1',time1))
yield wfd
else:
# if so check for inactive guid (peers)
wfd = defer.waitForDeferred(ht.tgetflag(dbpool))
yield wfd
guid1 = wfd.getResult()
if (guid1 != '0'):
print "deleting an inactive peer with the oldest timestamp"
# delete an inactive guid the one with the earliest timestamp and add new peer
wfd = defer.waitForDeferred(ht.tdeleteht(dbpool,guid))
yield wfd
wfd = defer.waitForDeferred(ht.taddht(dbpool,guid,address,port,'1',time1))
yield wfd
else:
# if there is none inactive than check if this one is closer than the farest peer
# if so replace the farest with the new peer else just drop this peer
print "There is no inactive peer"
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
peerlist = wfd.getResult()
self.farestpeer(peerid, peerlist)
# have to think about this
ht.tclosedb(dbpool)
return
def updateflag(self, guid):
# update the flag in the hash table to 0 = inactive which will be replaced by new entries to the table
ht=hashtable()
flag=0
dbpool=ht.tconnectdb(self.filename)
wfd = defer.waitForDeferred(ht.tupdateflag(self,dbpool,guid,flag))
yield wfd
ht.tclosedb(dbpool)
return
def closestpeer(self, peerid, peerlist):
# Given a peerid, it finds the closest in distance to peerid from a list of other peerid
# This is used to find a particular peer. It will search the the routing table for the
# closest peer and if not found it will search the closest peer routing table for the
# peer and so forth until it finds the correct peer
closestdistance=999999999999999999999999999999999999999999
closestpeer=peerid
for peer in peerlist:
#print "peer = "+str(peer)
d1=self.getdistance(peerid,peer)
#print "d1 = "+str(d1)
if (d1<closestdistance):
closestdistance=d1
closestpeer=peer
#print "cd = "+str(closestdistance)
#print "cp = "+str(peer)
#print "closest distance is "+str(closestdistance)
return closestpeer,d1
def farestpeer(self,peerid, peerlist):
# Will determine the farthest peer in a given peer hash table.
farestdistance=0
farestpeer=peerid
for peer in peerlist:
#print "peer = "+str(peer)
d1=self.getdistance(peerid,peer)
#print "d1 = "+str(d1)
if (d1>farestdistance):
farestdistance=d1
farestpeer=peer
#print "cd = "+str(closestdistance)
#print "cp = "+str(peer)
#print "farest distance is "+str(farestdistance)
return farestpeer,d1
def gettimestamp(self):
# Ts course timestamp
time1=time.time()
return time1
def getwebpage(self,url):
d = getPage(url)
d.addCallback(self.getwebpage_callback)
return d
def getwebpage_callback(self,html):
h=str(html)
print "html - ",h
return h
@defer.deferredGenerator
def postipaddress(self,address):
ipaddress = address+"?guid="+str(self.guid)+"&address="+str(self.address)+"&=port"+str(self.port)
wfd = defer.waitForDeferred(self.getwebpage(ipaddress))
yield wfd
pageresult = wfd.getResult()
return
@defer.deferredGenerator
def getipaddress(self):
pageresult="0"
wfd = defer.waitForDeferred(self.getwebpage("http://91.198.22.70/"))
yield wfd
pageresult = wfd.getResult()
if (pageresult !="0"):
int1=pageresult.find("Address:")+9
str1=pageresult[int1:]
int2=str1.find("<")
str2=str1[:int2]
print "ipaddress - ",str2
else:
wfd = defer.waitForDeferred(self.getwebpage("http://www.myipaddress.com/"))
yield wfd
pageresult = wfd.getResult()
if (pageresult !="0"):
int1=pageresult.find("Your computer's IP address is:")+60
str1=pageresult[int1:]
int2=str1.find("<")
str2=str1[:int2]
print "ipaddress - ",str2
else:
wfd = defer.waitForDeferred(self.getwebpage("http://ipinfo.info/index.php"))
yield wfd
pageresult = wfd.getResult()
if (pageresult !="0"):
int1=pageresult.find("My public IP Address:")+266
str1=pageresult[int1:]
int2=str1.find("<")
str2=str1[:int2]
print "ipaddress - ",str2
else:
print "[error] - internet connection seems to be down"
return
class encyption:
KEY_LENGTH = 1024
def blank_callback():
print "Replace the default dashes"
return
def generatewalletkeys(self,fname):
privpem=fname+"-private.pem"
pubpem=fname+"-public.pem"
# Random seed
Rand.rand_seed (os.urandom (self.KEY_LENGTH))
# Generate key pair
key = RSA.gen_key (self.KEY_LENGTH, 65537)
#Save private key
key.save_key (privpem, None)
#Save public key
key.save_pub_key (pubpem)
print "Wallet keys has been generated"
return
def generatekeys(self):
# Random seed
Rand.rand_seed (os.urandom (self.KEY_LENGTH))
# Generate key pair
key = RSA.gen_key (self.KEY_LENGTH, 65537)
# Create memory buffers
pri_mem = BIO.MemoryBuffer()
pub_mem = BIO.MemoryBuffer()
# Save keys to buffers
key.save_key_bio(pri_mem, None)
key.save_pub_key_bio(pub_mem)
# Get keys
public_key = pub_mem.getvalue()
private_key = pri_mem.getvalue()
return public_key, private_key
def getpublickey(self,fname):
pubpem=fname+"-public.pem"
pubkey = RSA.load_pub_key (pubpem)
pub_mem = BIO.MemoryBuffer()
pubkey.save_pub_key_bio(pub_mem)
public_key = pub_mem.getvalue()
print "public_key=",public_key
return public_key
def getpubkey(self,fname):
pubpem=fname+"-public.pem"
pubkey = RSA.load_pub_key (pubpem)
return pubkey
def getprivatekey(self,fname):
privpem=fname+"-private.pem"
prikey = RSA.load_key (privpem)
return prikey
def encyptmessage(self,message,pubkey):
#encrypt the message using that public key
#Only the private key can decrypt a message encrypted using the public key
CipherText = pubkey.public_encrypt (message, RSA.pkcs1_oaep_padding)
#print CipherText.encode ('base64')
print "CipherText - ",CipherText.encode ('base64')
return CipherText.encode ('base64')
def signmessage(self,CipherText,prikey):
# Generate a signature
#MsgDigest = M2Crypto.EVP.MessageDigest ('sha1')
MsgDigest = EVP.MessageDigest ('sha1')
MsgDigest.update (CipherText)
Signature = prikey.sign_rsassa_pss (MsgDigest.digest ())
#print Signature.encode ('base64')
return Signature
def verifymessage(self,CipherText,Signature,fname):
pubpem=fname+"-public.pem"
# Load the public key
VerifyRSA = M2Crypto.RSA.load_pub_key (pubpem)
# Verify the signature
MsgDigest = M2Crypto.EVP.MessageDigest ('sha1')
MsgDigest.update (CipherText)
if VerifyRSA.verify_rsassa_pss (MsgDigest.digest (), Signature) == 1:
print "This message was sent by Alice.\n"
verify = True
else:
print "This message was NOT sent by Alice!\n"
verify = False
return verify
def decyptmessage(self,CipherText,fname):
privpem=fname+"-private.pem"
# load the private key
ReadRSA = M2Crypto.RSA.load_key (privpem)
# decrypt the message using that private key
# If you use the wrong private key to try to decrypt the message it generates an exception
try:
PlainText = ReadRSA.private_decrypt (CipherText, M2Crypto.RSA.pkcs1_oaep_padding)
except:
print "Error: wrong key?"
PlainText = ""
if PlainText!="":
# print the result of the decryption
print "Message decrypted by Bob:"
print PlainText
return PlainText
def signastring(self,message,fname):
privpem=fname+"-private.pem"
#Generate a signature for a string
#Use the private key
SignEVP = M2Crypto.EVP.load_key (privpem)
#Begin signing
SignEVP.sign_init ()
#Tell it to sign our string
SignEVP.sign_update (message)
#Get the final result
StringSignature = SignEVP.sign_final ()
#Print the final result
print "Bob's signature for the string:"
print StringSignature.encode ('base64')
return StringSignature
def verifyastring(self,StringSignature,message,fname):
pubpem=fname+"-public.pem"
#Verify the string was signed by the right person
PubKey = M2Crypto.RSA.load_pub_key (pubpem)
#Initialize
VerifyEVP = M2Crypto.EVP.PKey()
#Assign the public key to our VerifyEVP
VerifyEVP.assign_rsa (PubKey)
#Begin verification
VerifyEVP.verify_init ()
#Tell it to verify our string, if this string is not identicial to the one that was signed, it will fail
VerifyEVP.verify_update (message)
#Was the string signed by the right person?
if VerifyEVP.verify_final (StringSignature) == 1:
print "The string was successfully verified."
verify = True
else:
print "The string was NOT verified!"
verify = False
return verify
class logfile:
def openlogw(self):
fo = open("./test/log.txt", "a")
return fo
def openlogr(self):
fo = open("./test/log.txt", "r")
return fo
def readlogline(self,fo):
line = fo.readline()
return line
def writelog(self,fo,str1):
localtime = time.asctime( time.localtime(time.time()) )
fo.write(str(localtime)+" "+str1+"\n");
return
def closelog(self,fo):
fo.close()
return
def printinfo(self,str1):
fo=self.openlogw()
self.writelog(fo,str1)
self.closelog(fo)
return
def main():
global FNAME,ADDRESS,PORT
print "Testing the zeitcoin utility classes...."
ut=utility(FNAME,ADDRESS,PORT)
en=encyption()
guid=ut.generateguid()
print "guid - ",guid
pubkey,privkey=en.generatekeys()
print "public key - ",pubkey
print "private key - ",privkey
print "Getting webpage http://91.198.22.70/ "
#ut.getwebpage("http://91.198.22.70/")
ut.getipaddress()
print "Finish testing the zeitcoin utility classes"
reactor.run()
sys.exit(0)
if __name__ == "__main__" : main()
|
{
"content_hash": "221991704f8b471fe7f08b36828c073d",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 106,
"avg_line_length": 29.997596153846153,
"alnum_prop": 0.7067072682105938,
"repo_name": "mmgrant73/zeitcoin",
"id": "f763aea951998a20bf354e48c18cda28593201c6",
"size": "12636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zeitcoinutility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73609"
}
],
"symlink_target": ""
}
|
"""Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
import os
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
try:
mtime = os.stat(path)[8]
except os.error:
return []
if mtime <> cached_mtime:
try:
list = os.listdir(path)
except os.error:
return []
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'
|
{
"content_hash": "47081dcdd83a27936e9b54e343576a8b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 72,
"avg_line_length": 26.405405405405407,
"alnum_prop": 0.6028659160696008,
"repo_name": "MalloyPower/parsing-python",
"id": "a35e16d1bc6ad5366e9c84c04fb47c1f0ca98f3c",
"size": "977",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/dircache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""
MAX31855_test.py
Alexander Hiam - 12/2012
Example program for PyBBIO's MAX31855 library.
Reads the temerature from the MAX31855 thermocouple
amplifier using software SPI.
This example program is in the public domain.
"""
from bbio import *
from bbio.libraries.MAX31855 import *
# Set variables for the pins connected to the ADC:
data_pin = GPIO1_15 # P8.15
clk_pin = GPIO1_14 # P8.16
cs_pin = GPIO0_27 # P8.17
# Create an instance of the MAX31855 class:
thermocouple = MAX31855(data_pin, clk_pin, cs_pin)
def setup():
# Nothing to do here, the MAX31855 class sets pin modes
pass
def loop():
temp = thermocouple.readTempC()
if (not temp):
# The MAX31855 reported an error, print it:
print thermocouple.error
else:
print "Temp: %0.2f C" % temp;
delay(1000)
run(setup, loop)
|
{
"content_hash": "697fc88a7c5be7327948be894081aa6b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 57,
"avg_line_length": 22.86111111111111,
"alnum_prop": 0.7035236938031592,
"repo_name": "ims-tyler/PyBBIO",
"id": "8666897f79e8e674a74023c72ea73c404e9b5091",
"size": "823",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/MAX31855_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "94254"
},
{
"name": "CSS",
"bytes": "4049"
},
{
"name": "HTML",
"bytes": "852"
},
{
"name": "JavaScript",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "175184"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.