text
stringlengths 4
1.02M
| meta
dict |
|---|---|
__author__ = "Anton Panasenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "apanasenko@qubell.com"
import unittest
import yaml
import logging as log
import re
from functools import wraps
from qubell.api.globals import *
from qubell.api.private.service import COBALT_SECURE_STORE_TYPE, WORKFLOW_SERVICE_TYPE, CLOUD_ACCOUNT_TYPE
import logging
import types
import requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(logging.ERROR)
def format_as_api(data):
"""
Accepts {'default':{},}
returns [{'name':'default',}]
"""
result = []
if isinstance(data, dict):
for name, value in data.items():
key = str(re.sub("[^a-zA-Z0-9_]", "", name))
value.update({'name': name})
result.append(value)
return result
else:
return data
def values(names):
"""
Method decorator that allows inject return values into method parameters.
It tries to find desired value going deep. For convinience injects list with only one value as value.
:param names: dict of "value-name": "method-parameter-name"
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
instance = None
if len(args)>1:
instance=args[1]
else:
instance = kwargs['instance']
def findReturnValues(rvalues):
for k, v in rvalues.iteritems():
if isinstance(v, dict):
findReturnValues(v) #go deep, to find desired name
if k in names.keys():
if isinstance(v,list) and len(v)==1:
kwargs.update({names[k]: v[0]})
else:
kwargs.update({names[k]: v})
findReturnValues(instance.returnValues)
#ensure all names was set
missing_params = [k for k, v in names.items() if v not in kwargs]
if missing_params:
raise AttributeError("Parameters {0} for '{1}' were not found".format(missing_params, func.__name__), missing_params)
func(*args, **kwargs)
return wrapped_func
return wrapper
def workflow(name, parameters=None, timeout=10):
if not parameters:
parameters = dict()
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
self = args[0]
instance = args[1]
assert instance.run_workflow(name, parameters)
if not instance.ready(timeout):
self.fail(
"Instance %s isn't ready in appropriate time: %s with parameters %s and timeout %s" % (
instance.instanceId, name, parameters, timeout
)
)
func(*args, **kwargs)
return wrapped_func
return wrapper
def environment(envdata):
"""
Class decorator that allows to run tests in sandbox against different Qubell environments.
Each test method in suite is converted to <test_name>_on_environemnt_<environment_name>
:param params: dict
"""
#assert isinstance(params, dict), "@environment decorator should take 'dict' with environments"
def copy(func, name=None):
return types.FunctionType(func.func_code, func.func_globals, name=name,
argdefs=func.func_defaults,
closure=func.func_closure)
def wraps_class(clazz):
if "environments" in clazz.__dict__:
log.warn("Class {0} environment attribute is overridden".format(clazz.__name__))
params = format_as_api(envdata)
clazz.environments = params
methods = [method
for _, method in clazz.__dict__.items()
if isinstance(method, types.FunctionType) and method.func_name.startswith("test")]
for env in params:
if env['name'] != DEFAULT_ENV_NAME():
env['name'] += '_for_%s' % clazz.__name__ # Each test class should have it's own set of envs.
for method in methods:
delattr(clazz, method.func_name)
log.info("Test '{0}' multiplied per environment in {1}".format(method.func_name, clazz.__name__))
for env in params:
new_name = method.func_name + "_on_environment_" + env['name']
setattr(clazz, new_name, copy(method, new_name))
return clazz
return wraps_class
# noinspection PyPep8Naming
def instance(byApplication):
def wrapper(func):
def get_environment_name(self, f):
separator = "_on_environment_"
if len(f.__name__.split(separator)) > 1:
env = f.__name__.split(separator)[1]
elif "_testMethodName" in self.__dict__ and len(self._testMethodName.split(separator)) > 1:
env = self._testMethodName.split(separator)[1]
else:
env = DEFAULT_ENV_NAME()
return env
@wraps(func)
def wrapped_func(*args, **kwargs):
self = args[0]
env = get_environment_name(self, func)
def find_by_application_name(app):
for inst in self.instances:
if inst.application.name == app and inst.environment.name == env:
return inst
return None
func(*args + (find_by_application_name(byApplication),), **kwargs)
return wrapped_func
return wrapper
class BaseTestCase(unittest.TestCase):
platform = None
parameters = None
sandbox = None
environments = None
@classmethod
def environment(cls, organization):
provider_config = {'configuration.provider': cls.parameters['provider_type'],
'configuration.legacy-regions': cls.parameters['provider_region'],
'configuration.endpoint-url': '',
'configuration.legacy-security-group': '',
'configuration.identity': cls.parameters['provider_identity'],
'configuration.credential': cls.parameters['provider_credential']}
# Old style components tests declared name as 'test-provider'. Now we cannot add this provider to env where another provider set.
if (cls.parameters['provider_name']=='test-provider') or (not(cls.parameters['provider_name'])):
prov = PROVIDER['provider_name']
else:
prov = cls.parameters['provider_name']
# Default add-on for every env
addon = {"services":
[{"name": DEFAULT_CREDENTIAL_SERVICE()},
{"name": DEFAULT_WORKFLOW_SERVICE()},
{"name": prov}
]}
servs = [{"type": COBALT_SECURE_STORE_TYPE, "name": DEFAULT_CREDENTIAL_SERVICE()},
{"type": WORKFLOW_SERVICE_TYPE, "name": DEFAULT_WORKFLOW_SERVICE()},
{"type": CLOUD_ACCOUNT_TYPE, "name": prov, "parameters": provider_config}]
insts = []
# Add provider, keystore, workflow to every env.
envs = cls.environments or [{"name": DEFAULT_ENV_NAME()},]
for env in envs:
env.update(addon)
return {
"organization": {"name": organization},
"services": servs,
"instances": insts,
"environments": envs}
@classmethod
def timeout(cls):
return 15
@classmethod
def setUpClass(cls):
super(BaseTestCase, cls).setUpClass()
if cls.parameters['organization']:
cls.prepare(cls.parameters['organization'], cls.timeout())
else:
cls.prepare(cls.__name__, cls.timeout())
@classmethod
def tearDownClass(cls):
if os.getenv("QUBELL_DEBUG", None) and not('false' in os.getenv("QUBELL_DEBUG", None)):
log.info("QUBELL_DEBUG is ON\n DO NOT clean sandbox")
else:
cls.clean()
super(BaseTestCase, cls).tearDownClass()
@classmethod
def prepare(cls, organization, timeout=30):
""" Create sandboxed test environment
"""
log.info("\n\n\n--------------- Preparing sandbox... ---------------")
cls.sandbox = SandBox(cls.platform, cls.environment(organization))
cls.organization = cls.sandbox.make()
cls.regular_instances = []
cls.service_instances = []
def launch_in_env(app, env):
environment = cls.organization.environments[env['name']]
application = cls.organization.applications[app['name']]
parameters = app.get('parameters', {})
settings = app.get('settings', {})
instance = cls.organization.create_instance(application=application,
environment=environment,
parameters=parameters,
**settings)
if app.get('add_as_service', False):
environment.add_service(instance)
cls.sandbox.sandbox["instances"].append({
"id": instance.instanceId,
"name": instance.name,
})
return instance
def check_instances(instances):
for instance in instances:
if not instance.running(timeout=timeout):
error = instance.error.strip()
# TODO: if instance fails to start during tests, add proper unittest log
if os.getenv("QUBELL_DEBUG", None) and not('false' in os.getenv("QUBELL_DEBUG", None)):
pass
else:
cls.clean()
assert not error, "Instance %s didn't launch properly and has error '%s'" % (instance.instanceId, error)
assert False, "Instance %s is not ready after %s minutes and stop on timeout" % (instance.instanceId, timeout)
# If 'meta' in sandbox, restore applications that comes in meta before.
# TODO: all this stuff needs refactoring.
applications = []
if cls.__dict__.get('meta'):
meta_raw = requests.get(url=cls.__dict__.get('meta'))
meta = yaml.safe_load(meta_raw.content)
#application = meta['kit']['name']
for app in meta['kit']['applications']:
applications.append({
'name': app['name'],
'url': app['manifest']})
cls.organization.restore({'applications':applications})
# launch service instances first
for app in cls.sandbox['applications']:
for env in cls.sandbox['environments']:
if app.get('launch', True) and app.get('add_as_service', False):
log.info("Sandbox: starting service in app: %s, env: %s" % (app['name'], env['name']))
cls.service_instances.append(launch_in_env(app, env))
check_instances(cls.service_instances)
# then launch non-service instances
for app in cls.sandbox['applications']:
for env in cls.sandbox['environments']:
if app.get('launch', True) and not app.get('add_as_service', False):
log.info("Sandbox: starting instance in app: %s, env: %s" % (app['name'], env['name']))
cls.regular_instances.append(launch_in_env(app, env))
check_instances(cls.regular_instances)
cls.instances = cls.service_instances + cls.regular_instances
log.info("\n--------------- Sandbox prepared ---------------\n\n")
@classmethod
def clean(cls, timeout=10):
log.info("\n--------------- Cleaning sandbox ---------------")
def destroy_instances(instances):
for instance in instances:
instance.destroy()
if not instance.destroyed(timeout):
log.error(
"Instance was not destroyed properly {0}: {1}", instance.id, instance.name)
destroy_instances(cls.regular_instances)
destroy_instances(cls.service_instances)
log.info("\n--------------- Sandbox cleaned ---------------\n")
# noinspection PyPep8Naming
def findByApplicationName(self, name):
for instance in self.instances:
if instance.application.name == name:
return instance
class SandBox(object):
def __init__(self, platform, sandbox):
self.sandbox = sandbox
self.platform = platform
self.organization = self.platform.organization(name=self.sandbox["organization"]["name"])
self.sandbox['instances'] = sandbox.get('instances', [])
@staticmethod
def load_yaml(platform, yaml_file):
return SandBox(platform, yaml.safe_load(yaml_file))
def make(self):
self.organization.restore(self.sandbox)
return self.organization
def clean(self):
# TODO: need cleaning mechanism
pass
def __check_environment_name(self, name):
import re
re.sub("")
def __getitem__(self, name):
if name in self.sandbox:
return self.sandbox[name]
else:
return None
|
{
"content_hash": "7e58e2c50a2b8281efd8917c83c2e9a2",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 137,
"avg_line_length": 37.93785310734463,
"alnum_prop": 0.5577066269545793,
"repo_name": "SergeyKemaev/contrib-python-qubell-client",
"id": "2a96e160b11f88585f52b8f233d2bd0817c97d9a",
"size": "14027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qubell/api/private/testing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "207671"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
}
|
import random
from templates.text import TextTemplate
def process(input, entities=None):
greetings = [
'Have a good day, sir.',
'Wonderful, I think it\'s time for my evening nap...',
'Bye to you as well, sir.',
'It was my pleasure talking to you.',
'Oh, please do not go!',
'It\'s sad to see you leave.',
'Farewell! I hope I will see you soon.',
'Sir, I think I need to sleep now...',
]
if entities is not None:
if 'sender' in entities and 'first_name' in entities['sender']:
sender_name = entities['sender']['first_name']
greetings = [greeting.replace('sir', sender_name) for greeting in greetings]
output = {
'input': input,
'output': TextTemplate(random.choice(greetings)).get_message(),
'success': True
}
return output
|
{
"content_hash": "d31bc24bce50ea6a5352b5f3feeba9f4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 88,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.5862068965517241,
"repo_name": "swapagarwal/JARVIS-on-Messenger",
"id": "2b2b5865239e5fd0311348e0fd17b69decd7bebc",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/src/bye.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63552"
}
],
"symlink_target": ""
}
|
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
import shutil
class TestSwiftBridgingHeaderHeadermap(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
TestBase.setUp(self)
@skipUnlessDarwin
@swiftTest
def test(self):
# To ensure we hit the rebuild problem remove the cache to avoid caching.
mod_cache = self.getBuildArtifact("my-clang-modules-cache")
if os.path.isdir(mod_cache):
shutil.rmtree(mod_cache)
self.runCmd('settings set symbols.clang-modules-cache-path "%s"'
% mod_cache)
self.runCmd('settings set frame-format ""')
self.build()
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['dylib'])
lldbutil.run_to_source_breakpoint(self, "break here",
lldb.SBFileSpec('dylib.swift'))
self.expect("fr v -d run-target -- a",
substrs=['(dylib.C<a.Wrapper>.Something)', "hello"])
self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
{
"content_hash": "9bc87ed1b8fb9aa5e229f6a40b924ed7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6322869955156951,
"repo_name": "apple/swift-lldb",
"id": "e5e5bbb05e77aa10f4aa6edd7267358cbe310d15",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "packages/Python/lldbsuite/test/lang/swift/clangimporter/bridging_header_headermap/TestSwiftBridgingHeaderHeadermap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "130449"
},
{
"name": "C",
"bytes": "198536"
},
{
"name": "C++",
"bytes": "27687071"
},
{
"name": "CMake",
"bytes": "172176"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "106804"
},
{
"name": "Objective-C",
"bytes": "106821"
},
{
"name": "Objective-C++",
"bytes": "25658"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "4680483"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Swift",
"bytes": "260786"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
}
|
import tornado.web
from run_service import make_app
from tests.handlers.handler_test_case import HandlerTestCase
class MockWeatherHandler(tornado.web.RequestHandler):
def get(self):
self.finish({
"query": {
"count": 1,
"created": "2017-11-09T17:30:06Z",
"lang": "en-US",
"results": {
"channel": {
"item": {
"condition": {
"code": "26",
"date": "Thu, 09 Nov 2017 10:00 AM CST",
"temp": "52",
"text": "Cloudy"
}
}
}
}
}
})
class TestMaggieHandler(HandlerTestCase):
def get_app(self):
mock_app = tornado.web.Application([("/weather", MockWeatherHandler)])
socket, self.mock_server_port = tornado.testing.bind_unused_port()
self.mock_server = tornado.httpserver.HTTPServer(mock_app)
self.mock_server.add_sockets([socket])
return make_app({
"weather_uri": f"http://localhost:{self.mock_server_port}/weather"
})
@tornado.testing.gen_test
async def test_get_returns_current_conditions_from_weather_api(self):
response = await self.fetch("/api/v1/maggie")
self.assertEqual(200, response.code)
self.assertEqual(
b"Currently 52 degrees and Cloudy in Austin, TX",
response.body)
|
{
"content_hash": "9e8a9faeb0bb2a63b740f27ba776ee76",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 32.51020408163265,
"alnum_prop": 0.4896421845574388,
"repo_name": "tjensen/PyTexas2017",
"id": "b9da42e5fec1609f765c0664690b1ae6f7ea12d5",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/handlers/test_maggie_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29810"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.common.utils.utils
:platform: Unix
:synopsis: Utility methods pasted and bastardized from all over the place. Can probably be removed completely.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey import app, mail
from flask_mail import Message
import boto
import traceback
prims = [int, str, unicode, bool, float, type(None)]
def sub_list(l):
r = []
for i in l:
if type(i) in prims:
r.append(i)
elif type(i) is list:
r.append(sub_list(i))
elif type(i) is dict:
r.append(sub_dict(i))
else:
print "Unknown Type: {}".format(type(i))
r = sorted(r)
return r
def sub_dict(d):
r = {}
for k in d:
if type(d[k]) in prims:
r[k] = d[k]
elif type(d[k]) is list:
r[k] = sub_list(d[k])
elif type(d[k]) is dict:
r[k] = sub_dict(d[k])
else:
print "Unknown Type: {}".format(type(d[k]))
return r
def send_email(subject=None, recipients=[], html=""):
"""
Given a message, will send that message over SES or SMTP, depending upon how the app is configured.
"""
plain_txt_email = "Please view in a mail client that supports HTML."
if app.config.get('EMAILS_USE_SMTP'):
try:
with app.app_context():
msg = Message(subject, recipients=recipients)
msg.body = plain_txt_email
msg.html = html
mail.send(msg)
app.logger.debug("Emailed {} - {} ".format(recipients, subject))
except Exception, e:
m = "Failed to send failure message with subject: {}\n{} {}".format(subject, Exception, e)
app.logger.debug(m)
app.logger.warn(traceback.format_exc())
else:
try:
ses_region = app.config.get('SES_REGION', 'us-east-1')
ses = boto.ses.connect_to_region(ses_region)
except Exception, e:
m = "Failed to connect to ses using boto. Check your boto credentials. {} {}".format(Exception, e)
app.logger.debug(m)
app.logger.warn(traceback.format_exc())
return
for email in recipients:
try:
ses.send_email(app.config.get('MAIL_DEFAULT_SENDER'), subject, html, email, format="html")
app.logger.debug("Emailed {} - {} ".format(email, subject))
except Exception, e:
m = "Failed to send failure message with subject: {}\n{} {}".format(subject, Exception, e)
app.logger.debug(m)
|
{
"content_hash": "1dfc7c36a7bd1ab6714dce73802627c6",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 114,
"avg_line_length": 32.373493975903614,
"alnum_prop": 0.5597320431708225,
"repo_name": "monkeysecurity/security_monkey",
"id": "92b887939082c2a4d549332e8d12063173ae062c",
"size": "3304",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "security_monkey/common/utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22086"
},
{
"name": "Dart",
"bytes": "81616"
},
{
"name": "HTML",
"bytes": "76595"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "441430"
},
{
"name": "Shell",
"bytes": "16916"
}
],
"symlink_target": ""
}
|
"""This module contains support for various built-in output mechanisms.
Here, a base OutputToFile class is implemented to provide simple output to
a file via the pickle serialization mechanism. It can be subclassed to implement
alternative serialization schemes, see json_factory.py and mfg_inspector.py for
examples.
"""
import base64
import contextlib
try:
import cPickle as pickle
except:
import pickle
import os
import shutil
import tempfile
from openhtf import util
from openhtf.util import data
import six
# TODO(wallacbe): Switch to util
class Atomic(object):
"""Class that does atomic write in a contextual manner."""
def __init__(self, filename):
self.filename = filename
self.temp = tempfile.NamedTemporaryFile(delete=False)
def write(self, write_data):
if hasattr(write_data, 'decode'):
return self.temp.write(write_data)
return self.temp.write(write_data.encode())
def close(self):
self.temp.close()
shutil.move(self.temp.name, self.filename)
class OutputToFile(object):
"""Output the given TestRecord to a file.
Instances of this class are intended to be used as an output callback
(see Test.add_output_callbacks) to output TestRecord results to a file.
This base implementation outputs the TestRecord by serializing it via
the pickle module. Subclasses may change this by overriding the
serialize_test_record() method. Additionally, subclasses may implement
more complex file naming mechanisms by overriding the open_file() method.
Args:
test_record: The TestRecord to write out to a file.
"""
def __init__(self, filename_pattern):
self.filename_pattern = filename_pattern
@staticmethod
def serialize_test_record(test_record):
"""Override method to alter how test records are serialized to file data."""
return pickle.dumps(test_record, -1)
@staticmethod
def open_file(filename):
"""Override method to alter file open behavior or file types."""
return Atomic(filename)
@contextlib.contextmanager
def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object')
def __call__(self, test_record):
with self.open_output_file(test_record) as outfile:
outfile.write(self.serialize_test_record(test_record))
|
{
"content_hash": "a4bba2bc4f1fb8b75b1ee5feb9634f11",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 32.34444444444444,
"alnum_prop": 0.7238062521470285,
"repo_name": "ShaperTools/openhtf",
"id": "d120600a305ead3df4c157da86200e15223ba9e8",
"size": "3505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openhtf/output/callbacks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17668"
},
{
"name": "HTML",
"bytes": "16790"
},
{
"name": "JavaScript",
"bytes": "10032"
},
{
"name": "Python",
"bytes": "865340"
},
{
"name": "Shell",
"bytes": "96"
},
{
"name": "TypeScript",
"bytes": "118082"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from django.utils.timezone import now
from pos.models.shift import Shift
from pos.models.stock import Order
from pos.models.user import User
from rest_framework import serializers
class ShiftSerializer(serializers.ModelSerializer):
cash = serializers.SerializerMethodField()
credit = serializers.SerializerMethodField()
card = serializers.SerializerMethodField()
vipps = serializers.SerializerMethodField()
prepaid = serializers.SerializerMethodField()
mobilepay = serializers.SerializerMethodField()
izettle = serializers.SerializerMethodField()
undo = serializers.SerializerMethodField()
shift_name = serializers.SerializerMethodField()
class Meta:
model = Shift
fields = ('__all__')
read_only_fiels = ('end')
def accumulate_sum(self, obj, payment_method):
if obj.end:
orders = Order.objects.filter(
date__gte=obj.start).filter(date__lte=obj.end).filter(authenticated_user=obj.authenticated_user)
else:
orders = Order.objects.filter(
date__gte=obj.start).filter(authenticated_user=obj.authenticated_user)
return sum([order.sum for order in orders if order.payment_method == payment_method])
def get_shift_name(self, obj):
local_start = obj.start + timedelta(hours=2)
return obj.authenticated_user.username + ' - Started: ' + local_start.strftime('%A %H:%M:%S')
def get_cash(self, obj):
return self.accumulate_sum(obj, 0)
def get_credit(self, obj):
return self.accumulate_sum(obj, 1)
def get_card(self, obj):
return self.accumulate_sum(obj, 2)
def get_vipps(self, obj):
return self.accumulate_sum(obj, 3)
def get_prepaid(self, obj):
return self.accumulate_sum(obj, 4)
def get_mobilepay(self, obj):
return self.accumulate_sum(obj, 5)
def get_izettle(self, obj):
return self.accumulate_sum(obj, 6)
def get_undo(self, obj):
return self.accumulate_sum(obj, 7)
class NewShiftSerializer(serializers.Serializer):
card = serializers.CharField(required=True)
def create(self, validated_data, request):
card = validated_data.get('card')
user = User.objects.get(card__iexact=card)
open_shifts = Shift.objects.filter(
authenticated_user=request.user).filter(end__isnull=True)
for shift in open_shifts:
shift.end = now()
shift.save()
new_shift = Shift(authenticated_user=request.user, user=user)
new_shift.save()
return new_shift
|
{
"content_hash": "0dcb8ca9bef3041a51affb46f5785fbd",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 112,
"avg_line_length": 31.392857142857142,
"alnum_prop": 0.664012135001896,
"repo_name": "nuxis/p0sX-server",
"id": "9060db36d22df8f873a15d26e47ae92855b8d7fa",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "p0sx/pos/serializers/shift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "351"
},
{
"name": "Makefile",
"bytes": "663"
},
{
"name": "Python",
"bytes": "126660"
},
{
"name": "Shell",
"bytes": "955"
},
{
"name": "Standard ML",
"bytes": "1597"
}
],
"symlink_target": ""
}
|
"""Extract and save accuracy to 'stats/accuracy.json'.
The accuracy is extracted from the most recent eventfile.
"""
import glob
import json
import os.path
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
TAG = 'accuracy'
def summary_dict(accuracies):
return {
'last%02d' % x: np.median(accuracies[-x:]) for x in [1, 10, 20, 50]
}
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
folder = argv[1]
matches = sorted(glob.glob(os.path.join(folder, 'tf/events.out.tfevents.*')))
assert matches, 'No events files found'
tags = set()
accuracies = []
for event_file in matches:
for e in tf.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == TAG:
accuracies.append(v.simple_value)
break
elif not accuracies:
tags.add(v.tag)
assert accuracies, 'No "accuracy" tag found. Found tags = %s' % tags
target_dir = os.path.join(folder, 'stats')
target_file = os.path.join(target_dir, 'accuracy.json')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
with open(target_file, 'w') as f:
json.dump(summary_dict(accuracies), f, sort_keys=True, indent=4)
print('Saved: %s' % target_file)
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "00dad062f76af09610fd5ec95dc48c22",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 81,
"avg_line_length": 27.49056603773585,
"alnum_prop": 0.6115305422100206,
"repo_name": "google-research/mixmatch",
"id": "db443900faba9fc36cfec4846ac7870ebb1d4c64",
"size": "2056",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/extract_accuracy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143832"
},
{
"name": "Shell",
"bytes": "10533"
}
],
"symlink_target": ""
}
|
from google.cloud import automl_v1beta1
def sample_deploy_model():
# Create a client
client = automl_v1beta1.AutoMlClient()
# Initialize request argument(s)
request = automl_v1beta1.DeployModelRequest(
name="name_value",
)
# Make the request
operation = client.deploy_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END automl_v1beta1_generated_AutoMl_DeployModel_sync]
|
{
"content_hash": "c2113d842d6df8797d6bfbda22870276",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 56,
"avg_line_length": 22.695652173913043,
"alnum_prop": 0.6934865900383141,
"repo_name": "googleapis/python-automl",
"id": "7d39d1da7af8bf18eec16dd84c48f214523a2cca",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2347989"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
from cpress import *
|
{
"content_hash": "a9d68e90c370d900e6f20323b0cbacdd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 20,
"alnum_prop": 0.8,
"repo_name": "solusipse/cpress",
"id": "265b66c1c500a339c2e5926776f14a7fcd5fc787",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16798"
},
{
"name": "Go",
"bytes": "9236"
},
{
"name": "Python",
"bytes": "7493"
}
],
"symlink_target": ""
}
|
"""Tests for unspect_utils module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import wraps
import imp
import types
import weakref
import six
from tensorflow.python import lib
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
def decorator(f):
return f
def function_decorator():
def dec(f):
return f
return dec
def wrapping_decorator():
def dec(f):
def replacement(*_):
return None
@wraps(f)
def wrapper(*args, **kwargs):
return replacement(*args, **kwargs)
return wrapper
return dec
class TestClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
def free_function():
pass
def factory():
return free_function
def free_factory():
def local_function():
pass
return local_function
class InspectUtilsTest(test.TestCase):
def test_getnamespace_globals(self):
ns = inspect_utils.getnamespace(factory)
self.assertEqual(ns['free_function'], free_function)
def test_getnamespace_hermetic(self):
# Intentionally hiding the global function to make sure we don't overwrite
# it in the global namespace.
free_function = object() # pylint:disable=redefined-outer-name
def test_fn():
return free_function
ns = inspect_utils.getnamespace(test_fn)
globs = six.get_function_globals(test_fn)
self.assertTrue(ns['free_function'] is free_function)
self.assertFalse(globs['free_function'] is free_function)
def test_getnamespace_locals(self):
def called_fn():
return 0
closed_over_list = []
closed_over_primitive = 1
def local_fn():
closed_over_list.append(1)
local_var = 1
return called_fn() + local_var + closed_over_primitive
ns = inspect_utils.getnamespace(local_fn)
self.assertEqual(ns['called_fn'], called_fn)
self.assertEqual(ns['closed_over_list'], closed_over_list)
self.assertEqual(ns['closed_over_primitive'], closed_over_primitive)
self.assertTrue('local_var' not in ns)
def test_getqualifiedname(self):
foo = object()
qux = imp.new_module('quxmodule')
bar = imp.new_module('barmodule')
baz = object()
bar.baz = baz
ns = {
'foo': foo,
'bar': bar,
'qux': qux,
}
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertEqual(inspect_utils.getqualifiedname(ns, foo), 'foo')
self.assertEqual(inspect_utils.getqualifiedname(ns, bar), 'bar')
self.assertEqual(inspect_utils.getqualifiedname(ns, baz), 'bar.baz')
def test_getqualifiedname_finds_via_parent_module(self):
# TODO(mdan): This test is vulnerable to change in the lib module.
# A better way to forge modules should be found.
self.assertEqual(
inspect_utils.getqualifiedname(
lib.__dict__, lib.io.file_io.FileIO, max_depth=1),
'io.file_io.FileIO')
def test_getmethodclass(self):
self.assertEqual(
inspect_utils.getmethodclass(free_function), None)
self.assertEqual(
inspect_utils.getmethodclass(free_factory()), None)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.class_method),
TestClass)
test_obj = TestClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.class_method),
TestClass)
def test_getmethodclass_locals(self):
def local_function():
pass
class LocalClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
self.assertEqual(
inspect_utils.getmethodclass(local_function), None)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.wrap_decorated_member),
LocalClass)
test_obj = LocalClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
test_obj)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
test_obj)
def test_getmethodclass_callables(self):
class TestCallable(object):
def __call__(self):
pass
c = TestCallable()
self.assertEqual(inspect_utils.getmethodclass(c), TestCallable)
def test_getmethodclass_weakref_mechanism(self):
test_obj = TestClass()
class WeakrefWrapper(object):
def __init__(self):
self.ag_self_weakref__ = weakref.ref(test_obj)
def test_fn(self):
return self
bound_method = types.MethodType(test_fn, WeakrefWrapper())
self.assertEqual(inspect_utils.getmethodclass(bound_method), test_obj)
def test_getmethodclass_no_bool_conversion(self):
tensor = constant_op.constant([1])
self.assertEqual(inspect_utils.getmethodclass(tensor.get_shape), tensor)
def test_getdefiningclass(self):
class Superclass(object):
def foo(self):
pass
def bar(self):
pass
@classmethod
def class_method(cls):
pass
class Subclass(Superclass):
def foo(self):
pass
def baz(self):
pass
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.foo, Subclass) is Subclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.bar, Subclass) is Superclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.baz, Subclass) is Subclass)
self.assertTrue(
inspect_utils.getdefiningclass(Subclass.class_method, Subclass) is
Superclass)
def test_isbuiltin(self):
self.assertTrue(inspect_utils.isbuiltin(range))
self.assertTrue(inspect_utils.isbuiltin(float))
self.assertTrue(inspect_utils.isbuiltin(int))
self.assertTrue(inspect_utils.isbuiltin(len))
self.assertFalse(inspect_utils.isbuiltin(function_decorator))
def test_super_wrapper_for_dynamic_attrs(self):
a = object()
b = object()
class Base(object):
def __init__(self):
self.a = a
class Subclass(Base):
def __init__(self):
super(Subclass, self).__init__()
self.b = b
base = Base()
sub = Subclass()
sub_super = super(Subclass, sub)
sub_super_wrapped = inspect_utils.SuperWrapperForDynamicAttrs(sub_super)
self.assertIs(base.a, a)
self.assertIs(sub.a, a)
self.assertFalse(hasattr(sub_super, 'a'))
self.assertIs(sub_super_wrapped.a, a)
# TODO(mdan): Is this side effect harmful? Can it be avoided?
# Note that `b` was set in `Subclass.__init__`.
self.assertIs(sub_super_wrapped.b, b)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "7bb14f26f1949e714af0aa28a3c84bed",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 78,
"avg_line_length": 25.255072463768116,
"alnum_prop": 0.6695741994720532,
"repo_name": "brchiu/tensorflow",
"id": "622e3bafc0ab3d7dd8876cbbbee45f8055c48056",
"size": "9402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/pyct/inspect_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-public-methods
import unittest
from synbiochem.biochem4j import taxonomy
class TestTaxonomy(unittest.TestCase):
'''Test class for taxonomy.'''
def test_get_synonyms_by_id(self):
'''Tests get_synonyms_by_id method.'''
synonyms = taxonomy.get_synonyms_by_id('511145')
self.assertTrue('Escherichia coli strain MG1655' in synonyms)
def test_get_children_by_id(self):
'''Tests get_children_by_id method.'''
children = taxonomy.get_children_by_id('83333')
self.assertTrue('511145' in [child['taxonomy'] for child in children])
def test_get_parent_by_id(self):
'''Tests get_parent_by_id method.'''
self.assertEqual(taxonomy.get_parent_by_id('511145')['taxonomy'],
'83333')
def test_get_parent_by_name(self):
'''Tests taxonomy method.'''
name = 'Escherichia coli str. K-12 substr. MG1655'
self.assertEqual(taxonomy.get_parent_by_name(name)['taxonomy'],
'83333')
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{
"content_hash": "0c53e6e7866017728595f1b0c1b6eb20",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 30.795454545454547,
"alnum_prop": 0.6369003690036901,
"repo_name": "synbiochem/synbiochem-py",
"id": "8333f3bb383c02964ff282b06b4874e0715d5603",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synbiochem/biochem4j/test/test_taxonomy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121177"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
}
|
"""
This is the single point of entry to generate the sample configuration
file for Arestor.
"""
import collections
from arestor.config import base as conf_base
from arestor.config import factory as conf_factory
def get_options():
"""Collect all the options info from the other modules."""
options = collections.defaultdict(list)
for opt_class in conf_factory.get_options():
if not issubclass(opt_class, conf_base.Options):
continue
config_options = opt_class(None)
options[config_options.group_name].extend(config_options.list())
return [(key, value) for key, value in options.items()]
|
{
"content_hash": "fdf6eddd0f252baa9ad11aaff94d4599",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 32.15,
"alnum_prop": 0.7076205287713841,
"repo_name": "alexcoman/arestor",
"id": "1785b5ea9de0f290f2c89d61ac2fe1de2aa9cc85",
"size": "1259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arestor/config/options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47319"
}
],
"symlink_target": ""
}
|
import os,datetime,hashlib
from app import app,webscrape
import chardet
def process_source(token,uploadedfile=None,weburl=None):
sourcefilename = os.path.join(app.config['UPLOAD_FOLDER'],''.join(['source',token,'.txt']))
if uploadedfile:
process_uploaded_txt_file(uploadedfile=uploadedfile,targetfilename=sourcefilename)
if weburl:
text = webscrape.gettextfromurl(weburl)
if text and len(text) < app.config['MAX_WEB_CONTENT_LENGTH']:
with open(sourcefilename, "a", encoding='utf-8') as f:
f.write(text)
if os.stat(sourcefilename).st_size:
return sourcefilename
else:
return None
def process_uploaded_txt_file(uploadedfile,targetfilename):
if allowed_file_txt(uploadedfile.filename) and uploadedfile.content_length < app.config['MAX_FILE_CONTENT_LENGTH']:
uploadedfile.save(targetfilename)
with open(targetfilename, "r", encoding=getencoding(targetfilename)) as f:
text = f.read()
with open(targetfilename, "w", encoding='utf-8') as f:
f.write(text)
return True
def allowed_file_txt(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS_TXT']
def getencoding(filename):
with open(filename, 'rb') as f:
rawdata = f.read()
if chardet.detect(rawdata)['encoding']:
return chardet.detect(rawdata)['encoding']
else:
return 'UTF-8'
def gettoken():
return str(hashlib.sha1(str(datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')).encode('utf-8')).hexdigest())
def allowed_file_img(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS_IMG']
def maintenance():
checkfolder(app.config['UPLOAD_FOLDER'])
checkfolder(app.config['OUTPUT_FOLDER'])
return True
def checkfolder(folderpath=None):
if os.path.isdir(folderpath):
cutoff = datetime.datetime.now()-datetime.timedelta(minutes=10)
for file in os.listdir(folderpath):
if datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(folderpath,file))) < cutoff:
os.remove(os.path.join(folderpath,file))
else:
os.makedirs(folderpath)
return True
|
{
"content_hash": "c1e7a49a162d8cb1712012874ae7f568",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 119,
"avg_line_length": 32.791666666666664,
"alnum_prop": 0.6416772554002541,
"repo_name": "OlegPyatakov/tagclouds",
"id": "27dea6a476849149b3d09dc7f237fc9f7867fd69",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/app/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9348"
},
{
"name": "Python",
"bytes": "16199"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class RuleDataSource(Model):
"""The resource from which the rule collects its data.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleMetricDataSource, RuleManagementEventDataSource
:param resource_uri: the resource identifier of the resource the rule
monitors. **NOTE**: this property cannot be updated for an existing rule.
:type resource_uri: str
:param odatatype: Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource': 'RuleMetricDataSource', 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource': 'RuleManagementEventDataSource'}
}
def __init__(self, resource_uri=None):
self.resource_uri = resource_uri
self.odatatype = None
|
{
"content_hash": "0a931d8dce6042da16fd83478068ec22",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 221,
"avg_line_length": 35.4375,
"alnum_prop": 0.6798941798941799,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "9dbcb1e1a5b754a1c19f7273755fde62bef8745f",
"size": "1608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/rule_data_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from sqlnice import *
from tablenice import *
from columnnice import *
from functions import *
|
{
"content_hash": "ce2b966bc1a280a8f3ae47304bbfb1ea",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 24,
"avg_line_length": 23.5,
"alnum_prop": 0.7978723404255319,
"repo_name": "TRBaldim/sqlNice",
"id": "2a58185e75aabaa93ae7ff36905d26ab524f0e07",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlNice/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40411"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/modern/shared_coffee_table_modern_style_01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_coffee_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "9d32a334061a6b0a0c877cf04104c883",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 25,
"alnum_prop": 0.7046153846153846,
"repo_name": "anhstudios/swganh",
"id": "58c06d28d6ca77daa57376dff6369926e8a85af2",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/furniture/modern/shared_coffee_table_modern_style_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
def FancyDivide(list_of_numbers, index):
denom = list_of_numbers[index]
return [SimpleDivide(item, denom)
for item in list_of_numbers]
def SimpleDivide(item, denom):
try:
return item / denom
except:
return 0
|
{
"content_hash": "e3ee2d4e7af174b28bf4610221649a57",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 43,
"avg_line_length": 23.09090909090909,
"alnum_prop": 0.6259842519685039,
"repo_name": "xala3pa/Introduction-to-Computer-Science-and-Programming-with-python",
"id": "dc8cbaba44b94ac1494e19c97d486dccdda43193",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week4/Problems/L8PROBLEM3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220675"
}
],
"symlink_target": ""
}
|
from yapsy.IPlugin import IPlugin
import datetime
import pymssql
import re
class PluginModelo(IPlugin):
'''
Retorna provider do beneficiario para checagens
- Nome
- Sexo
- Nascimento
- Datas de inativação
'''
name = "PROVIDER BENEFICIARIO"
def executa(self, objeto):
print('''
#
# Executando: %s
#
''' % self.name)
# para todos os beneficiarios presentes na guia
carteiras_e = objeto.root.xpath("//ans:numeroCarteira", namespaces=objeto.nsmap)
carteiras = [e.text for e in carteiras_e]
beneficiarios_unicos = set([ "'%s'" % re.sub(r"\D", "", i) for i in carteiras])
provider = {}
query = '''
Select
right(replicate('0',17)+cast(codigo as varchar(17)),17) as codigo,
left(right(replicate('0',17)+cast(codigo as varchar(17)),17), 4) as operadora,
pessoa.Nome as nome,
pessoa.Sexo as sexo,
beneficiario.tipo,
pessoa.DataNascimento as nascimento
from beneficiario
inner join Pessoa on Beneficiario.Pessoa=Pessoa.AutoId
where Codigo in (
%s
)
''' % ",".join(beneficiarios_unicos)
try:
provider_conf = objeto.provider_conf['cardio']
except:
print("INFO: provider do Cardio nao encontrado")
return False
servidor = provider_conf['servidor']
usuario = provider_conf['usuario']
banco = provider_conf['banco']
senha = provider_conf['senha']
conn = pymssql.connect(servidor, usuario, senha, banco, as_dict=True)
print("conectando a banco...")
cursor = conn.cursor()
print(query)
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
# para cada um
if row['tipo'] != 9:
local = True
else:
local = False
dados = {
"nome" : row['nome'],
"sexo" : row['sexo'],
"nascimento" : row['nascimento'],
"operadora" : row['operadora'],
"codigo" : row['codigo'],
"local": local
}
# atualiza o provider
provider[row['codigo']] = dados
# registra global
objeto.registra_provider('beneficiario', provider)
|
{
"content_hash": "de6f051feef16456d8897af2661b8f79",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 88,
"avg_line_length": 32.17333333333333,
"alnum_prop": 0.5337753833402403,
"repo_name": "dudanogueira/tiss",
"id": "e6172f6cbb434548af8c234d8a7685319dcd2097",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiss/extensoes/providers/cardio_beneficiario/cardio_beneficiario.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41937"
}
],
"symlink_target": ""
}
|
"""
Tools for different regimes procedure estimations
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import pysal
import scipy.sparse as SP
import itertools as iter
from scipy.stats import f, chisqprob
import numpy.linalg as la
class Chow:
'''
Chow test of coefficient stability across regimes. The test is a
particular case of the Wald statistic in which the constraint are setup
according to the spatial or other type of regime structure
...
Arguments
=========
reg : regression object
Regression object from PySAL.spreg which is assumed to have the
following attributes:
* betas : coefficient estimates
* vm : variance covariance matrix of betas
* kr : Number of variables varying across regimes
* kryd : Number of endogenous variables varying across regimes
* kf : Number of variables fixed (global) across regimes
* nr : Number of regimes
Attributes
==========
joint : tuple
Pair of Wald statistic and p-value for the setup of global
regime stability, that is all betas are the same across
regimes.
regi : array
kr x 2 array with Wald statistic (col 0) and its p-value (col 1)
for each beta that varies across regimes. The restrictions
are setup to test for the global stability (all regimes have the
same parameter) of the beta.
Examples
========
>>> import numpy as np
>>> import pysal
>>> from ols_regimes import OLS_Regimes
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y_var = 'CRIME'
>>> y = np.array([db.by_col(y_var)]).reshape(49,1)
>>> x_var = ['INC','HOVAL']
>>> x = np.array([db.by_col(name) for name in x_var]).T
>>> r_var = 'NSA'
>>> regimes = db.by_col(r_var)
>>> olsr = OLS_Regimes(y, x, regimes, constant_regi='many', nonspat_diag=False, spat_diag=False, name_y=y_var, name_x=x_var, name_ds='columbus', name_regimes=r_var)
>>> print olsr.name_x_r #x_var
['CONSTANT', 'INC', 'HOVAL']
>>> print olsr.chow.regi
[[ 0.01020844 0.91952121]
[ 0.46024939 0.49750745]
[ 0.55477371 0.45637369]]
>>> print 'Joint test:'
Joint test:
>>> print olsr.chow.joint
(0.6339319928978806, 0.8886223520178802)
'''
def __init__(self, reg):
kr, kf, kryd, nr, betas, vm = reg.kr, reg.kf, reg.kryd, reg.nr, reg.betas, reg.vm
if betas.shape[0] != vm.shape[0]:
if kf>0:
betas = betas[0:vm.shape[0],:]
kf = kf-1
else:
brange = []
for i in range(nr):
brange.extend(range(i*(kr+1),i*(kr+1)+kr))
betas = betas[brange,:]
r_global = []
regi = np.zeros((reg.kr, 2))
for vari in np.arange(kr):
r_vari = buildR1var(vari, kr, kf, kryd, nr)
r_global.append(r_vari)
q = np.zeros((r_vari.shape[0], 1))
regi[vari, :] = wald_test(betas, r_vari, q, vm)
r_global = np.vstack(tuple(r_global))
q = np.zeros((r_global.shape[0], 1))
joint = wald_test(betas, r_global, q, vm)
self.joint = joint
self.regi = regi
class Wald:
'''
Chi sq. Wald statistic to test for restriction of coefficients.
Implementation following Greene [1]_ eq. (17-24), p. 488
...
Arguments
=========
reg : regression object
Regression object from PySAL.spreg
r : array
Array of dimension Rxk (R being number of restrictions) with constrain setup.
q : array
Rx1 array with constants in the constraint setup. See Greene
[1]_ for reference.
Attributes
==========
w : float
Wald statistic
pvalue : float
P value for Wald statistic calculated as a Chi sq. distribution
with R degrees of freedom
References
==========
.. [1] W. Greene. 2003. Econometric Analysis (Fifth Edtion). Prentice Hall, Upper
Saddle River.
'''
def __init__(self, reg, r, q=None):
if not q:
q = np.zeros((r.shape[0], 1))
self.w, self.pvalue = wald_test(reg.betas, r, q, reg.vm)
class Regimes_Frame:
'''
Setup framework to work with regimes. Basically it involves:
* Dealing with the constant in a regimes world
* Creating a sparse representation of X
* Generating a list of names of X taking into account regimes
...
Arguments
=========
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: [False, 'one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* False: no constant term is appended in any way
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
names : None, list of strings
Names of independent variables for use in output
Returns
=======
x : csr sparse matrix
Sparse matrix containing X variables properly aligned for
regimes regression. 'xsp' is of dimension (n, k*r) where 'r'
is the number of different regimes
The structure of the alignent is X1r1 X2r1 ... X1r2 X2r2 ...
names : None, list of strings
Names of independent variables for use in output
conveniently arranged by regimes. The structure of the name
is "regimeName_-_varName"
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
Appends to self
===============
'''
def __init__(self, x, regimes, constant_regi, cols2regi, names=None, yend=False):
if cols2regi == 'all':
cols2regi = [True] * x.shape[1]
else:
if yend:
cols2regi = cols2regi[-x.shape[1]:]
else:
cols2regi = cols2regi[0:x.shape[1]]
if constant_regi:
x = np.hstack((np.ones((x.shape[0], 1)), x))
if constant_regi == 'one':
cols2regi.insert(0, False)
elif constant_regi == 'many':
cols2regi.insert(0, True)
else:
raise Exception, "Invalid argument (%s) passed for 'constant_regi'. Please secify a valid term."%str(constant)
try:
x = regimeX_setup(x, regimes, cols2regi, self.regimes_set, constant=constant_regi)
except AttributeError:
self.regimes_set = list(set(regimes))
self.regimes_set.sort()
x = regimeX_setup(x, regimes, cols2regi, self.regimes_set, constant=constant_regi)
kr = len(np.where(np.array(cols2regi)==True)[0])
if yend:
self.kr += kr
self.kf += len(cols2regi) - kr
self.kryd = kr
else:
self.kr = kr
self.kf = len(cols2regi) - self.kr
self.kryd = 0
self.nr = len(set(regimes))
if names:
names = set_name_x_regimes(names, regimes, constant_regi, cols2regi, self.regimes_set)
return (x, names)
def wald_test(betas, r, q, vm):
'''
Chi sq. Wald statistic to test for restriction of coefficients.
Implementation following Greene [1]_ eq. (17-24), p. 488
...
Arguments
=========
betas : array
kx1 array with coefficient estimates
r : array
Array of dimension Rxk (R being number of restrictions) with constrain setup.
q : array
Rx1 array with constants in the constraint setup. See Greene
[1]_ for reference.
vm : array
kxk variance-covariance matrix of coefficient estimates
Returns
=======
w : float
Wald statistic
pvalue : float
P value for Wald statistic calculated as a Chi sq. distribution
with R degrees of freedom
References
==========
.. [1] W. Greene. 2003. Econometric Analysis (Fifth Edtion). Prentice Hall, Upper
Saddle River.
'''
rbq = np.dot(r, betas) - q
rvri = la.inv(np.dot(r, np.dot(vm, r.T)))
w = np.dot(rbq.T, np.dot(rvri, rbq))[0][0]
df = r.shape[0]
pvalue = chisqprob(w, df)
return w, pvalue
def buildR(kr, kf, nr):
'''
Build R matrix to globally test for spatial heterogeneity across regimes.
The constraint setup reflects the null every beta is the same
across regimes
...
Arguments
=========
kr : int
Number of variables that vary across regimes ("regimized")
kf : int
Number of variables that do not vary across regimes ("fixed" or
global)
nr : int
Number of regimes
Returns
=======
R : array
Array with constrain setup to test stability across regimes of
one variable
'''
return np.vstack(tuple(map(buildR1var, np.arange(kr), [kr]*kr, [kf]*kr, [nr]*kr)))
def buildR1var(vari, kr, kf, kryd, nr):
'''
Build R matrix to test for spatial heterogeneity across regimes in one
variable. The constraint setup reflects the null betas for variable 'vari'
are the same across regimes
...
Arguments
=========
vari : int
Position of the variable to be tested (order in the sequence of
variables per regime)
kr : int
Number of variables that vary across regimes ("regimized")
kf : int
Number of variables that do not vary across regimes ("fixed" or
global)
nr : int
Number of regimes
Returns
=======
R : array
Array with constrain setup to test stability across regimes of
one variable
'''
ncols = (kr * nr)
nrows = nr - 1
r = np.zeros((nrows, ncols), dtype=int)
rbeg = 0
krexog = kr - kryd
if vari < krexog:
kr_j = krexog
cbeg = vari
else:
kr_j = kryd
cbeg = krexog*(nr-1) + vari
r[rbeg: rbeg+nrows , cbeg] = 1
for j in np.arange(nrows):
r[rbeg+j, kr_j + cbeg + j*kr_j] = -1
return np.hstack( (r, np.zeros((nrows, kf), dtype=int)) )
def regimeX_setup(x, regimes, cols2regi, regimes_set, constant=False):
'''
Flexible full setup of a regime structure
NOTE: constant term, if desired in the model, should be included in the x
already
...
Arguments
=========
x : np.array
Dense array of dimension (n, k) with values for all observations
IMPORTANT: constant term (if desired in the model) should be
included
regimes : list
list of n values with the mapping of each observation to a
regime. Assumed to be aligned with 'x'.
cols2regi : list
List of k booleans indicating whether each column should be
considered as different per regime (True) or held constant
across regimes (False)
regimes_set : list
List of ordered regimes tags
constant : [False, 'one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* False: no constant term is appended in any way
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
Returns
=======
xsp : csr sparse matrix
Sparse matrix containing the full setup for a regimes model
as specified in the arguments passed
NOTE: columns are reordered so first are all the regime
columns then all the global columns (this makes it much more
efficient)
Structure of the output matrix (assuming X1, X2 to vary
across regimes and constant term, X3 and X4 to be global):
X1r1, X2r1, ... , X1r2, X2r2, ... , constant, X3, X4
'''
cols2regi = np.array(cols2regi)
if set(cols2regi) == set([True]):
xsp = x2xsp(x, regimes, regimes_set)
elif set(cols2regi) == set([False]):
xsp = SP.csr_matrix(x)
else:
not_regi = x[:, np.where(cols2regi==False)[0]]
regi_subset = x[:, np.where(cols2regi)[0]]
regi_subset = x2xsp(regi_subset, regimes, regimes_set)
xsp = SP.hstack( (regi_subset, SP.csr_matrix(not_regi)) , format='csr')
return xsp
def set_name_x_regimes(name_x, regimes, constant_regi, cols2regi, regimes_set):
'''
Generate the set of variable names in a regimes setup, according to the
order of the betas
NOTE: constant term, if desired in the model, should be included in the x
already
...
Arguments
=========
name_x : list/None
If passed, list of strings with the names of the
variables aligned with the original dense array x
IMPORTANT: constant term (if desired in the model) should be
included
regimes : list
list of n values with the mapping of each observation to a
regime. Assumed to be aligned with 'x'.
constant_regi : [False, 'one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* False: no constant term is appended in any way
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list
List of k booleans indicating whether each column should be
considered as different per regime (True) or held constant
across regimes (False)
regimes_set : list
List of ordered regimes tags
Returns
=======
name_x_regi
'''
k = len(cols2regi)
if constant_regi:
k -= 1
if not name_x:
name_x = ['var_'+str(i+1) for i in range(k)]
if constant_regi:
name_x.insert(0, 'CONSTANT')
nxa = np.array(name_x)
c2ra = np.array(cols2regi)
vars_regi = nxa[np.where(c2ra==True)]
vars_glob = nxa[np.where(c2ra==False)]
name_x_regi = []
for r in regimes_set:
rl = ['%s_%s'%(str(r), i) for i in vars_regi]
name_x_regi.extend(rl)
name_x_regi.extend(['_Global_%s'%i for i in vars_glob])
return name_x_regi
def w_regimes(w, regimes, regimes_set, transform=True, get_ids=None, min_n=None):
'''
Subsets W matrix according to regimes
...
Attributes
==========
w : pysal W object
Spatial weights object
regimes : list
list of n values with the mapping of each observation to a
regime. Assumed to be aligned with 'x'.
regimes_set : list
List of ordered regimes tags
Returns
=======
w_regi : dictionary
Dictionary containing the subsets of W according to regimes: [r1:w1, r2:w2, ..., rR:wR]
'''
regi_ids = dict((r, list(np.where(np.array(regimes) == r)[0])) for r in regimes_set)
w_ids = dict((r, map(w.id_order.__getitem__, regi_ids[r])) for r in regimes_set)
w_regi_i = {}
warn = "\n"
for r in regimes_set:
w_regi_i[r] = pysal.weights.w_subset(w, w_ids[r])
if min_n:
if w_regi_i[r].n < min_n:
raise Exception, "There are less observations than variables in regime %s." %r
if transform:
w_regi_i[r].transform = w.get_transform()
if w_regi_i[r].islands:
warn += "Warning: The regimes operation resulted in islands for regime %s.\n" %r
if get_ids:
get_ids = regi_ids
if len(warn)<3:
warn = None
return w_regi_i, get_ids, warn
def w_regimes_union(w, w_regi_i, regimes_set):
'''
Combines the subsets of the W matrix according to regimes
...
Attributes
==========
w : pysal W object
Spatial weights object
w_regi_i : dictionary
Dictionary containing the subsets of W according to regimes: [r1:w1, r2:w2, ..., rR:wR]
regimes_set : list
List of ordered regimes tags
Returns
=======
w_regi : pysal W object
Spatial weights object containing the union of the subsets of W
'''
w_regi = pysal.weights.w_union(w_regi_i[regimes_set[0]], w_regi_i[regimes_set[1]])
if len(regimes_set)>2:
for i in range(len(regimes_set))[2:]:
w_regi = pysal.weights.w_union(w_regi, w_regi_i[regimes_set[i]])
w_regi = pysal.weights.remap_ids(w_regi, dict((i,i) for i in w_regi.id_order),w.id_order)
w_regi.transform = w.get_transform()
return w_regi
def x2xsp(x, regimes, regimes_set):
'''
Convert X matrix with regimes into a sparse X matrix that accounts for the
regimes
...
Attributes
==========
x : np.array
Dense array of dimension (n, k) with values for all observations
regimes : list
list of n values with the mapping of each observation to a
regime. Assumed to be aligned with 'x'.
regimes_set : list
List of ordered regimes tags
Returns
=======
xsp : csr sparse matrix
Sparse matrix containing X variables properly aligned for
regimes regression. 'xsp' is of dimension (n, k*r) where 'r'
is the number of different regimes
The structure of the alignent is X1r1 X2r1 ... X1r2 X2r2 ...
'''
n, k = x.shape
data = x.flatten()
R = len(regimes_set)
regime_by_row = np.array([[r] * k for r in list(regimes_set)]).flatten() #X1r1 X2r1 ... X1r2 X2r2 ...
row_map = dict((r, np.where(regime_by_row == r)[0]) for r in regimes_set)
indices = np.array([row_map[row] for row in regimes]).flatten()
indptr = np.zeros((n+1, ), dtype=int)
indptr[:-1] = list(np.arange(n) * k)
indptr[-1] = n*k
return SP.csr_matrix((data, indices, indptr))
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
from ols_regimes import OLS_Regimes
db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49,1)
x_var = ['INC','HOVAL']
x = np.array([db.by_col(name) for name in x_var]).T
r_var = 'NSA'
regimes = db.by_col(r_var)
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
olsr = OLS_Regimes(y, x, regimes, w=w, constant_regi='many', nonspat_diag=False, spat_diag=False, name_y=y_var, name_x=x_var, name_ds='columbus', name_regimes=r_var, name_w='columbus.gal')
print olsr.summary
|
{
"content_hash": "c95047b9bfe19367127ab1835c9b7b63",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 192,
"avg_line_length": 37.66258741258741,
"alnum_prop": 0.5497841526249826,
"repo_name": "pombreda/pysal",
"id": "bbf45a2c6b3bd5d836dbfddc989b18c15a49b0e9",
"size": "21543",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pysal/spreg/regimes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10152"
},
{
"name": "JavaScript",
"bytes": "1188"
},
{
"name": "Makefile",
"bytes": "254"
},
{
"name": "Python",
"bytes": "2060153"
},
{
"name": "Shell",
"bytes": "11128"
},
{
"name": "XSLT",
"bytes": "17216"
}
],
"symlink_target": ""
}
|
from classes import wunderpy_wrapper
from classes import grocerylist
from classes import grocerystore
wp = wunderpy_wrapper.wunderpy_wrapper('../data/tokens.csv')
obj = wp.get_task_positions_obj(wp.WUNDERLIST_GROCERY)
grocery_store = grocerystore.groceryStore('../data/store_order_zehrs.csv', '../data/ingredient_categories.csv') # use the default zehrs store; good enough
groceries = grocerylist.groceryList(wp.WUNDERLIST_GROCERY, wp)
groceries.get_tasks()
groceries.get_category_for_element(groceries.grocery_list[0], grocery_store)
groceries.get_categories(grocery_store)
groceries.reorder_list(wp)
# wp.update_list_order(groceries.wunderlist_order_obj)
# TODO check reloading of a list when you enter the right sheet
# TODO sort by cat order value, not cat id.
print('done')
|
{
"content_hash": "15a6c3687085661fdbe4c90efccf6bef",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 154,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7869897959183674,
"repo_name": "briancousins/RecipeBook",
"id": "fda9c7db46182b5e50d8cdae4bc4d814f49e0e1f",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_grocerylist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31087"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0005_auto_20160407_0115'),
]
operations = [
migrations.RemoveField(
model_name='school',
name='picture',
),
]
|
{
"content_hash": "31c5908077516a2a9d7a82d429078f76",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 18.88235294117647,
"alnum_prop": 0.5856697819314641,
"repo_name": "AnimeshSinha1309/WebsiteEdunet",
"id": "4a6f4468e01ec7ccd4bae728e8717c38a12436e4",
"size": "393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "WebsiteEdunet/schools/migrations/0006_remove_school_picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1055"
},
{
"name": "CSS",
"bytes": "73628"
},
{
"name": "HTML",
"bytes": "166803"
},
{
"name": "JavaScript",
"bytes": "129963"
},
{
"name": "PowerShell",
"bytes": "1482"
},
{
"name": "Python",
"bytes": "5360033"
},
{
"name": "Tcl",
"bytes": "24396"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0050_auto_20170826_0316'),
]
operations = [
migrations.AlterField(
model_name='userreviewlist',
name='comment',
field=models.CharField(blank=True, default='', max_length=200, null=True),
),
]
|
{
"content_hash": "9269eb1470c9d6214b0f2ae816876822",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 86,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6047619047619047,
"repo_name": "internship2016/sovolo",
"id": "8e897dc61a4aae26e59829db9e0f2f404e66f036",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/user/migrations/0051_auto_20170827_1615.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56092"
},
{
"name": "HTML",
"bytes": "132262"
},
{
"name": "JavaScript",
"bytes": "107993"
},
{
"name": "Python",
"bytes": "255017"
}
],
"symlink_target": ""
}
|
'''
HRF Functions
=============
Various Hemodynamic Response Functions (HRFs) implemented by NiPy
Copyright (c) 2006-2017, NIPY Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NIPY Developers nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
__all__ = ['spm_hrf',
'glover_hrf',
'spm_time_derivative',
'glover_time_derivative',
'spm_dispersion_derivative']
from os.path import dirname, join, sep as pathsep
import nibabel as nib
import importlib
import os
from sklearn.pipeline import Pipeline
from scipy.stats import gamma
import numpy as np
import collections
from types import GeneratorType
def _gamma_difference_hrf(tr, oversampling=16, time_length=32., onset=0.,
delay=6, undershoot=16., dispersion=1.,
u_dispersion=1., ratio=0.167):
""" Compute an hrf as the difference of two gamma functions
Parameters
----------
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the hrf
Returns
-------
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
dt = tr / oversampling
time_stamps = np.linspace(0, time_length, float(time_length) / dt)
time_stamps -= onset / dt
hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) - \
ratio * gamma.pdf(
time_stamps, undershoot / u_dispersion, dt / u_dispersion)
hrf /= hrf.sum()
return hrf
def spm_hrf(tr, oversampling=16, time_length=32., onset=0.):
""" Implementation of the SPM hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset)
def glover_hrf(tr, oversampling=16, time_length=32., onset=0.):
""" Implementation of the Glover hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
hrf: array of shape(length / tr * oversampling, float),
hrf sampling on the oversampled time grid
"""
return _gamma_difference_hrf(tr, oversampling, time_length, onset,
delay=6, undershoot=12., dispersion=.9,
u_dispersion=.9, ratio=.35)
def spm_time_derivative(tr, oversampling=16, time_length=32., onset=0.):
""" Implementation of the SPM time derivative hrf (dhrf) model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr, float),
dhrf sampling on the provided grid
"""
do = .1
dhrf = 1. / do * (spm_hrf(tr, oversampling, time_length, onset + do) -
spm_hrf(tr, oversampling, time_length, onset))
return dhrf
def glover_time_derivative(tr, oversampling=16, time_length=32., onset=0.):
"""Implementation of the flover time derivative hrf (dhrf) model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr, float),
dhrf sampling on the provided grid
"""
do = .1
dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset + do) -
glover_hrf(tr, oversampling, time_length, onset))
return dhrf
def spm_dispersion_derivative(tr, oversampling=16, time_length=32., onset=0.):
"""Implementation of the SPM dispersion derivative hrf model.
Args:
tr: float, scan repeat time, in seconds
oversampling: int, temporal oversampling factor, optional
time_length: float, hrf kernel length, in seconds
onset: float, onset of the response
Returns:
dhrf: array of shape(length / tr * oversampling, float),
dhrf sampling on the oversampled time grid
"""
dd = .01
dhrf = 1. / dd * (_gamma_difference_hrf(tr, oversampling, time_length,
onset, dispersion=1. + dd) -
spm_hrf(tr, oversampling, time_length, onset))
return dhrf
|
{
"content_hash": "4fef8ece0eddca46532d02a7ac37e9c9",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 78,
"avg_line_length": 35.824858757062145,
"alnum_prop": 0.6633023182463333,
"repo_name": "elvandy/nltools",
"id": "175167a494634ad8a095846f9ab41f8d73b275af",
"size": "6341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nltools/external/hrf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267341"
}
],
"symlink_target": ""
}
|
import sys
import os
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "Icons"
SHORT = "icn"
OBJECT = "NOTUSED"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"GetIconCacheData",
"SetIconCacheData",
# Constants with funny definitions
"kSelectorAllHugeData",
"kSelectorAllAvailableData",
"svAllAvailableData",
# Something in a comment accidentally seen as a const definition
"err",
# OS8 only
'IconServicesTerminate',
# Lazy, right now.
"GetIconRefFromFileInfo"
]
def makeblacklisttypes(self):
return [
"IconActionUPP",
"IconGetterUPP",
"CFragInitBlockPtr",
"CGRect_ptr",
]
def makerepairinstructions(self):
return [
]
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
self.defsfile.write("from Carbon.Files import *\n")
if __name__ == "__main__":
main()
|
{
"content_hash": "124486adc10b959a010919c1b0580da1",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 85,
"avg_line_length": 29.557142857142857,
"alnum_prop": 0.5451909134847752,
"repo_name": "TathagataChakraborti/resource-conflicts",
"id": "bdc3b84413e7c7fa8709d34878ff63ce25bb946d",
"size": "2144",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "PLANROB-2015/seq-sat-lama/Python-2.5.2/Mac/Modules/icn/icnscan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "Batchfile",
"bytes": "9764"
},
{
"name": "C",
"bytes": "14253103"
},
{
"name": "C++",
"bytes": "754817"
},
{
"name": "CSS",
"bytes": "9779"
},
{
"name": "DIGITAL Command Language",
"bytes": "13234"
},
{
"name": "Emacs Lisp",
"bytes": "174752"
},
{
"name": "Groff",
"bytes": "43625"
},
{
"name": "HTML",
"bytes": "418642"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Makefile",
"bytes": "392287"
},
{
"name": "Matlab",
"bytes": "918"
},
{
"name": "Objective-C",
"bytes": "28604"
},
{
"name": "Perl",
"bytes": "163937"
},
{
"name": "Prolog",
"bytes": "66"
},
{
"name": "Python",
"bytes": "38769203"
},
{
"name": "R",
"bytes": "2349"
},
{
"name": "SAS",
"bytes": "57249"
},
{
"name": "Shell",
"bytes": "173594"
},
{
"name": "TeX",
"bytes": "5169842"
},
{
"name": "VimL",
"bytes": "9563"
},
{
"name": "Visual Basic",
"bytes": "1443"
}
],
"symlink_target": ""
}
|
import behave
from behave import step
from commons.rest_utils import RestUtils
from commons.constants import TENANT_DOC, TENANT_OWNER, TENANT_VERSION, TENANT_WSIZE, TENANT_DEFAULT_DOC
from commons.configuration import HEADERS, TENANT_ID
import commons.utils as Utils
api_utils = RestUtils()
behave.use_step_matcher("re")
@step(u'the tenant "([^"]*)"')
def set_tenant_id(context, tenant_id):
context.tenant_id = tenant_id
@step(u'a created tenant')
def set_default_tenant(context):
#Set default tenant_id as a global variable
context.tenant_id = TENANT_ID
@step(u'I retrieve the tenant information')
def retrieve_tenant_information(context):
context.req = api_utils.retrieve_information(tenant_id=context.tenant_id, headers=context.headers)
@step(u'I get the following information')
def check_tenant_information(context):
assert context.req.ok, 'Invalid HTTP status code. Status Code obtained is: {}'.format(context.req.status_code)
response = Utils.assert_json_format(context.req)
for expected_result in context.table.rows:
assert response[TENANT_DOC] == TENANT_DEFAULT_DOC, 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_DOC, TENANT_DEFAULT_DOC,
TENANT_DOC, response[TENANT_DOC])
assert response[TENANT_OWNER] == expected_result[TENANT_OWNER], 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_OWNER,
expected_result[TENANT_OWNER],
TENANT_OWNER,
response[TENANT_OWNER])
assert TENANT_VERSION in response, 'API Version not found in the response'
assert TENANT_WSIZE in response, 'WindowSize value not found in the API response'
@step(u'I obtain an "([^"]*)" and the "([^"]*)"')
def assert_error_response(context, error_code, fault_element):
Utils.assert_error_code_error(response=context.req, expected_error_code=error_code,
expected_fault_element=fault_element)
@step(u'an incorrect token with value "([^"]*)"')
def set_incorrect_token(context, token):
#Set and incorrect header to obtain unauthorized error
context.headers = Utils.create_header(token=token)
@step(u'I update the window size to "(?P<window_size>.*)"')
def update_window_size(context, window_size):
try:
context.window_size = int(window_size)
except ValueError:
print("Window Size can not be converted to integer")
context.window_size = window_size
context.req = api_utils.update_window_size(tenant_id=context.tenant_id, window_size=context.window_size,
headers=context.headers)
@step(u'the window size is updated in Policy Manager with value "([^"]*)"')
def assert_window_size(context, window_size):
assert context.req.ok, str(context.req.status_code) + context.req.content
response = Utils.assert_json_format(context.req)
assert str(response[TENANT_WSIZE]) == window_size
context.req = api_utils.retrieve_information(tenant_id=context.tenant_id, headers=context.headers)
response = Utils.assert_json_format(context.req)
assert str(response[TENANT_WSIZE]) == window_size
|
{
"content_hash": "966bcdd33dfdd5dc6cee5367a409616e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 114,
"avg_line_length": 38.641304347826086,
"alnum_prop": 0.6149085794655414,
"repo_name": "Fiware/cloud.Cloto",
"id": "a8fbb00b2cc8899e2a01be30252c5ad2a9665443",
"size": "4404",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fiware_cloto/cloto/tests/acceptance/component/features/steps/tenant_information.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "50363"
},
{
"name": "Gherkin",
"bytes": "66746"
},
{
"name": "Python",
"bytes": "324830"
},
{
"name": "Shell",
"bytes": "9496"
}
],
"symlink_target": ""
}
|
import os, sys, time, threading, traceback
import struct, math, json
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
DBusGMainLoop(set_as_default=True)
ble = dbus.Interface(dbus.SystemBus().get_object("com.devicehive.bluetooth", "/com/devicehive/bluetooth"), "com.devicehive.bluetooth")
cloud = dbus.Interface(dbus.SystemBus().get_object("com.devicehive.cloud", "/com/devicehive/cloud"), "com.devicehive.cloud")
sensors = {}
DEFAULT_PRIORITY = 100
def accelerometer_handler(mac, uuid, message):
data = bytearray.fromhex(message)
data = struct.unpack('<3b', data)
x = data[0] / 64.0
y = data[1] / 64.0
z = data[2] / -64.0
absolute = math.sqrt(x*x + y*y + z*z)
return {'x': x, 'y': y, 'z': z, 'abs': absolute}
def accelerometer_CC2650_handler(mac, uuid, message):
data = bytearray.fromhex(message)
data = struct.unpack('<3h', data[6:12])
x = data[0] * 2.0 / 32768.0
y = data[1] * 2.0 / 32768.0
z = data[2] * 2.0 / 32768.0
absolute = math.sqrt(x*x + y*y + z*z)
return {'x': x, 'y': y, 'z': z, 'abs': absolute}
init = {
'SensorTag' : {
'F000AA1104514000b000000000000000' : {
'notification' : 'accelerometer',
'write': [('F000AA1204514000b000000000000000', '01'),
('F000AA1304514000b000000000000000', 'A0')],
'handler': accelerometer_handler
},
},
'CC2650 SensorTag' : {
'F000AA8104514000b000000000000000' : {
'notification' : 'accelerometer',
'write': [('F000AA8204514000b000000000000000', '3800'),
('F000AA8304514000b000000000000000', 'A0')],
'handler': accelerometer_CC2650_handler
},
}
}
def device_discovered(mac, name, rssi):
if 'SensorTag' not in name: return
print("Discovered %s (%s) %s" % (mac, name, rssi))
if mac in sensors and sensors[mac][1]: return
sensors[mac] = (name, False)
ble.Connect(mac, False, ignore_reply = True)
def device_connected(mac):
if mac not in sensors: return
print("Connected: %s (%s)" % (sensors[mac][0], mac))
name = sensors[mac][0]
sensors[mac] = (name, True)
if name not in init:
print("Could not find init config for %s" % name)
return
for char, config in init[name].items():
print("Configuring: %s (%s) - %s" % (name, mac, char))
for charname, value in config['write']:
ble.GattWrite(mac, charname, value, ignore_reply = True)
ble.GattNotifications(mac, char, True, ignore_reply = True)
def device_disconnected(mac):
if mac not in sensors: return
print("Disconnected: %s (%s)" % (sensors[mac][0], mac))
del sensors[mac]
pass
def notification_received(mac, uuid, message):
if mac not in sensors: return # unknown device
name = sensors[mac][0]
if name not in init or uuid not in init[name]: return # no handlers for this notification
handler = init[name][uuid]['handler']
result = handler(mac, uuid, message)
notification = init[name][uuid]['notification']
print("MAC: %s, UUID: %s => %s" % (mac, uuid, result))
cloud.SendNotification(notification, json.dumps({
'SensorTag': mac,
'Value': result
}), DEFAULT_PRIORITY
, error_handler=lambda err: print(err)
, reply_handler=lambda *args: None)
ble.connect_to_signal("PeripheralDiscovered", device_discovered)
ble.connect_to_signal("PeripheralConnected", device_connected)
ble.connect_to_signal("PeripheralDisconnected", device_disconnected)
ble.connect_to_signal("NotificationReceived", notification_received)
exiting = threading.Event()
def worker():
while not exiting.is_set():
ble.ScanStart()
exiting.wait(5)
ble.ScanStop()
exiting.wait(10)
def main():
# init d-bus
GObject.threads_init()
dbus.mainloop.glib.threads_init()
# start mainloop
loop = GObject.MainLoop()
worker_thread = threading.Thread(target=worker,)
worker_thread.start()
try:
loop.run()
except (KeyboardInterrupt, SystemExit):
exiting.set()
loop.quit()
worker_thread.join()
if __name__ == "__main__":
main()
|
{
"content_hash": "9dea985b1aec619e6195e5517c40ef86",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 134,
"avg_line_length": 30.47142857142857,
"alnum_prop": 0.6223628691983122,
"repo_name": "devicehive/IoT-framework",
"id": "62a193ee5e14aaed517504bd34346c669f3486a1",
"size": "4286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sensortag-cloud.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26477"
},
{
"name": "C++",
"bytes": "2143"
},
{
"name": "Go",
"bytes": "91380"
},
{
"name": "JavaScript",
"bytes": "8912"
},
{
"name": "Python",
"bytes": "17513"
},
{
"name": "Shell",
"bytes": "20104"
}
],
"symlink_target": ""
}
|
import numpy as np
import lms_code.lib.rep2 as rep2
import lms_code.plots.plot_all as lms_plot
from lms_code.lib.volume_mesh import VolumeMesh
import sys
sys.setrecursionlimit(50000)
def get_leftmost(mesh):
leftmost_pt = np.array([1e9, 1e9])
for e in mesh:
for v in [e.vertex1, e.vertex2]:
if v.loc[0] < leftmost_pt[0]:
leftmost_pt = v.loc
return leftmost_pt
def build_decoupled_boundary(mesh, int_params):
leftmost_pt = get_leftmost(mesh)
left_of_that = [int_params['min_x'], leftmost_pt[1]]
the_edge = [0, 1]
boundary = [[leftmost_pt, left_of_that], [the_edge]]
return boundary
def mesh_interior(bem_soln, int_params):
bem_mesh = bem_soln['combined_mesh']
region = ((int_params['min_x'], int_params['max_x']),
(int_params['min_y'], int_params['max_y'] + 1))
decoupled = build_decoupled_boundary(bem_soln['fault_mesh'], int_params)
vm = VolumeMesh(bem_mesh,
bem_soln['fault_mesh'],
region,
refine_length = int_params['edge_length_threshold'],
refine_area = int_params['tri_area_threshold'],
near_edge_factor = int_params['near_edge_refine'],
extra_edges = [decoupled]
)
return vm
def main():
model = 'all_details'
bem_soln = rep2.load('bem_' + model)
int_params = lms_plot.interior_params()
vm = mesh_interior(bem_soln, int_params)
int_eval = vm.get_evaluator()
rep2.save('interior_mesh_' + model, int_eval)
if __name__ == "__main__":
main()
|
{
"content_hash": "05029069f75b3ea67128d98601803938",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 32.42,
"alnum_prop": 0.5854410857495373,
"repo_name": "tbenthompson/LMS_public",
"id": "baed9059eb312b9acac4ce335ea3d750df04a4c0",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lms_code/analysis/mesh_interior.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "3914"
},
{
"name": "Python",
"bytes": "153738"
},
{
"name": "Shell",
"bytes": "195"
}
],
"symlink_target": ""
}
|
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import time
import re
from types import ListType
import docutils
from docutils import nodes, utils, writers, languages
import roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage')
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table:
def __init__(self):
self._rows = []
self._options = ['center', ]
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines)>0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp`` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
document_start = """Man page generated from reStructeredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fC', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ', ),
'sidebar-title' : ('.SS ', ),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE dont specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
# filter body
for i in xrange(len(self.body)-1,0,-1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i-1][:4] in ('.BI ','.IP '):
self.body[i] = '.\n'
elif (self.body[i-1][:3] == '.B ' and
self.body[i-2][:4] == '.TP\n'):
self.body[i] = '.\n'
elif (self.body[i-1] == '\n' and
self.body[i-2][0] != '.' and
(self.body[i-3][:7] == '.TP\n.B '
or self.body[i-3][:4] == '\n.B ')
):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
def deunicode(self, text):
text = text.replace(u'\xa0', '\\ ')
text = text.replace(u'\u2020', '\\(dg')
return text
def visit_Text(self, node):
text = node.astext()
text = text.replace('\\','\\e')
replace_pairs = [
(u'-', ur'\-'),
(u'\'', ur'\(aq'),
(u'´', ur'\''),
(u'`', ur'\(ga'),
]
for (in_char, out_markup) in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
if self._in_literal:
# prevent interpretation of "." at line start
if text[0] == '.':
text = '\\&' + text
text = text.replace('\n.', '\n\\&.')
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char:
enum_style = {
'bullet' : '\\(bu',
'emdash' : '\\(em',
}
def __init__(self, style):
self._style = style
if node.has_key('start'):
self._cnt = node['start'] - 1
else:
self._cnt = 0
self._indent = 2
if style == 'arabic':
# indentation depends on number of childrens
# and start value.
self._indent = len(str(len(node.children)))
self._indent += len(str(self._cnt)) + 1
elif style == 'loweralpha':
self._cnt += ord('a') - 1
self._indent = 3
elif style == 'upperalpha':
self._cnt += ord('A') - 1
self._indent = 3
elif style.endswith('roman'):
self._indent = 5
def next(self):
if self._style == 'bullet':
return self.enum_style[self._style]
elif self._style == 'emdash':
return self.enum_style[self._style]
self._cnt += 1
# TODO add prefix postfix
if self._style == 'arabic':
return "%d." % self._cnt
elif self._style in ('loweralpha', 'upperalpha'):
return "%c." % self._cnt
elif self._style.endswith('roman'):
res = roman.toRoman(self._cnt) + '.'
if self._style.startswith('upper'):
return res.upper()
return res.lower()
else:
return "%d." % self._cnt
def get_width(self):
return self._indent
def __repr__(self):
return 'enum_style-%s' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title_upper)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.body.append(self.header())
self.body.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
if name:
self.body.append('.IP %s\n' %
self.language.labels.get(name, name))
def depart_admonition(self, node):
self.body.append('.RE\n')
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
depart_attention = depart_admonition
def visit_docinfo_item(self, node, name):
if name == 'author':
self._docinfo[name].append(node.astext())
else:
self._docinfo[name] = node.astext()
self._docinfo_keys.append(name)
raise nodes.SkipNode
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
depart_author = depart_docinfo_item
def visit_authors(self, node):
# _author is called anyway.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
depart_caution = depart_admonition
def visit_citation(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % num)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('['+node.astext()+']')
raise nodes.SkipNode
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
depart_contact = depart_docinfo_item
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
depart_danger = depart_admonition
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# NOTE nothing should be written before this
self.append_header()
def visit_doctest_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_doctest_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_document(self, node):
# no blank line between comment and header.
self.body.append(self.comment(self.document_start).rstrip()+'\n')
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
% ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
'title', 'title_upper', 'version')
for name in self._docinfo_keys:
if name == 'address':
self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
self.language.labels.get(name, name),
self.defs['indent'][0] % 0,
self.defs['indent'][0] % BLOCKQOUTE_INDENT,
self._docinfo[name],
self.defs['indent'][1],
self.defs['indent'][1],
) )
elif not name in skip:
if name in self._docinfo_names:
label = self._docinfo_names[name]
else:
label = self.language.labels.get(name, name)
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]) )
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append( self.comment(
'Generated by docutils manpage writer.\n' ) )
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
'"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
depart_error = depart_admonition
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if self._in_docinfo:
name_normalized = self._field_name.lower().replace(" ","_")
self._docinfo_names[name_normalized] = self._field_name
self.visit_docinfo_item(node, name_normalized)
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent()
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
self.indent(2.5)
self.indent(0)
def depart_figure(self, node):
self.dedent()
self.dedent()
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
base_node=node)
def depart_footer(self, node):
pass
def visit_footnote(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % self.deunicode(num))
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
def depart_header(self, node):
pass
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
depart_hint = depart_admonition
def visit_subscript(self, node):
self.body.append('\\s-2\\d')
def depart_subscript(self, node):
self.body.append('\\u\\s0')
def visit_superscript(self, node):
self.body.append('\\s-2\\u')
def depart_superscript(self, node):
self.body.append('\\d\\s0')
def visit_attribution(self, node):
self.body.append('\\(em ')
def depart_attribution(self, node):
self.body.append('\n')
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
if 'uri' in node.attributes:
text.append(node.attributes['uri'])
self.body.append('[image: %s]\n' % ('/'.join(text)))
raise nodes.SkipNode
def visit_important(self, node):
self.visit_admonition(node, 'important')
depart_important = depart_admonition
def visit_label(self, node):
# footnote and citation
if (isinstance(node.parent, nodes.footnote)
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
base_node=node)
self.body.append('[')
def depart_label(self, node):
self.body.append(']\n')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# WHAT should we use .INDENT, .UNINDENT ?
def visit_line_block(self, node):
self._line_block += 1
if self._line_block == 1:
self.body.append('.nf\n')
else:
self.body.append('.in +2\n')
def depart_line_block(self, node):
self._line_block -= 1
if self._line_block == 0:
self.body.append('.fi\n')
self.body.append('.sp\n')
else:
self.body.append('.in -2\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),) )
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_literal_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_meta(self, node):
raise NotImplementedError, node.astext()
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
depart_note = depart_admonition
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self):
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
#
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1]>0:
self.body.append(', ')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fB%s ' % node['delimiter'] )
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# blank backslash blank, switch font then a blank
self.body.append(' \\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
pass
def visit_paragraph(self, node):
# ``.PP`` : Start standard indented paragraph.
# ``.LP`` : Start block paragraph, all except the first.
# ``.P [type]`` : Start paragraph type.
# NOTE dont use paragraph starts because they reset indentation.
# ``.sp`` is only vertical space
self.ensure_eol()
self.body.append('.sp\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext() + "\n")
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
depart_revision = depart_docinfo_item
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
depart_status = depart_docinfo_item
def visit_strong(self, node):
self.body.append(self.defs['strong'][0])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['strong'][0])
elif isinstance(node.parent, nodes.document):
self.visit_docinfo_item(node, 'subtitle')
elif isinstance(node.parent, nodes.section):
self.body.append(self.defs['strong'][0])
def depart_subtitle(self, node):
# document subtitle calls SkipNode
self.body.append(self.defs['strong'][1]+'\n.PP\n')
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
pass
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.ensure_eol()
self.body.extend(self._active_table.as_list())
self._active_table = None
def visit_target(self, node):
# targets are in-document hyper targets, without any use for man-pages.
raise nodes.SkipNode
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# MAYBE double line '='
pass
def depart_thead(self, node):
# MAYBE double line '='
pass
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
depart_tip = depart_admonition
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.defs['topic-title'][0])
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['sidebar-title'][0])
elif isinstance(node.parent, nodes.admonition):
self.body.append('.IP "')
elif self.section_level == 0:
self._docinfo['title'] = node.astext()
# document title for .TH
self._docinfo['title_upper'] = node.astext().upper()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH ')
else:
self.body.append('.SS ')
def depart_title(self, node):
if isinstance(node.parent, nodes.admonition):
self.body.append('"')
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
depart_warning = depart_admonition
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set fileencoding=utf-8 et ts=4 ai :
|
{
"content_hash": "761698b9e38a015811eebbbb1aa671e5",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 80,
"avg_line_length": 30.461328480436762,
"alnum_prop": 0.5438958090629388,
"repo_name": "aswadrangnekar/khandelwal",
"id": "a02b92d4527fd431d0e02969169e2cbc9c00c43f",
"size": "33672",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "app/lib/docutils/writers/manpage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "131360"
},
{
"name": "Python",
"bytes": "2611746"
},
{
"name": "Ruby",
"bytes": "293"
}
],
"symlink_target": ""
}
|
import keras
import numpy as np
from matplotlib import pyplot as plt
from skimage import transform
model = keras.applications.InceptionResNetV2(include_top=False)
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar10.load_data()
def generate(X, y, batch_size):
while True:
choices = np.random.choice(X.shape[0], (batch_size,))
imgs = []
for i in choices:
img = transform.resize(X[i], (299, 299), mode='constant')
imgs.append(img)
imgs = np.array(imgs)
pred = model.predict(imgs)
yield pred, y[choices]
y_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
model_x = keras.layers.Input((8, 8, 1536))
model_y = keras.layers.AveragePooling2D((8, 8))(model_x)
model_y = keras.layers.Flatten()(model_y)
model_y = keras.layers.Dense(10, activation='softmax')(model_y)
model_new = keras.models.Model(model_x, model_y)
model_new.compile('adam', 'categorical_crossentropy', ['accuracy'])
model_new.fit_generator(generator=generate(X_train, y_one_hot, 50), steps_per_epoch=50, epochs=1)
model_new.evaluate_generator(generator=generate(X_test, y_test_one_hot, 1), steps=50)
model_new.save_weights('./weights/weight_cifar10_test.hdf5')
model_new.save('./weights/model_cifar10_test.hdf5')
model_x = keras.layers.Input((8, 8, 1536))
model_y = keras.layers.GlobalAveragePooling2D()(model_x)
model_y = keras.layers.Dense(10, activation='softmax')(model_y)
model_new_2 = keras.models.Model(model_x, model_y)
model_new_2.compile('adam', 'categorical_crossentropy', ['accuracy'])
model_new_2.fit_generator(generator=generate(X_train, y_one_hot, 50), steps_per_epoch=100, epochs=1)
model_new_2.evaluate_generator(generator=generate(X_test, y_test_one_hot, 1), steps=50)
model_new_2.save_weights('./weights/weight_cifar10_test_2.hdf5')
model_new_2.save('./weights/model_cifar10_test_2.hdf5')
|
{
"content_hash": "2f7f1e2ca01c1bd6ccbb3274b881532c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 100,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.7017634854771784,
"repo_name": "amozie/amozie",
"id": "dd0902b581a41edc51cbcdb3969982ce1015ea07",
"size": "1928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testzie/cifar10_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5024031"
},
{
"name": "Python",
"bytes": "209777"
}
],
"symlink_target": ""
}
|
from bottle import request, response, static_file, redirect
from admin.config.config import ROOT
#@render('home/index.html')
def index():
return redirect('/project/index/1')
def static(path):
return static_file(path, ROOT+ '/static')
|
{
"content_hash": "1d580fe8011dbc7791db79c137439597",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.7254098360655737,
"repo_name": "seraphlnWu/in_trip",
"id": "780ffd4043b33b3774f3bd8a2acd29b64df308d1",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "in_trip/admin/controllers/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20830"
},
{
"name": "Java",
"bytes": "4625"
},
{
"name": "JavaScript",
"bytes": "76507"
},
{
"name": "Python",
"bytes": "349718"
},
{
"name": "Scheme",
"bytes": "6001"
},
{
"name": "Shell",
"bytes": "7188"
}
],
"symlink_target": ""
}
|
def flatten(nested_list):
return [item for sublist in nested_list for item in sublist]
def mean(values):
return sum(values) / len(values)
def sign(value):
if value < 0: return -1
if value == 0: return 0
if value > 0: return 1
|
{
"content_hash": "850f0a3dd852eefbb23167b34e6ac46f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.6377952755905512,
"repo_name": "probablytom/msci-model",
"id": "107f9d8a67131900ce771fedf0e74aa6366190e2",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resp_base/utility_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59874"
}
],
"symlink_target": ""
}
|
import os
import sys
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.http import HttpResponse
from django.core.management import execute_from_command_line
filename = os.path.splitext(os.path.basename(__file__))[0]
urlpatterns = patterns(
'',
url(r'psql/select', '%s.psql_select' % filename, name='home'),
)
def psql_select(request):
return HttpResponse('Django rules!')
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
execute_from_command_line([sys.argv[0], 'runserver'])
|
{
"content_hash": "d661b3cb119bb6bca8a7791c0860b2a6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 26.727272727272727,
"alnum_prop": 0.7091836734693877,
"repo_name": "vvv-v13/backend-tools",
"id": "e9d4597623275a0c99f9c1d3a44ba1920e4b91f1",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/django/app.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Go",
"bytes": "4254"
},
{
"name": "JavaScript",
"bytes": "2656"
},
{
"name": "Python",
"bytes": "7038"
},
{
"name": "Shell",
"bytes": "229"
}
],
"symlink_target": ""
}
|
import sys
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from tacker.common import config
from tacker.openstack.common import service as common_service
from tacker import service
from tacker.openstack.common import gettextutils
from tacker.openstack.common import log as logging
gettextutils.install('tacker', lazy=True)
LOG = logging.getLogger(__name__)
def main():
# the configuration will be read into the cfg.CONF global data structure
config.init(sys.argv[1:])
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the default"
" search paths (~/.tacker/, ~/, /etc/tacker/, /etc/) and"
" the '--config-file' option!"))
try:
tacker_api = service.serve_wsgi(service.TackerApiService)
launcher = common_service.launch(tacker_api,
workers=cfg.CONF.api_workers)
launcher.wait()
except KeyboardInterrupt:
pass
except RuntimeError as e:
sys.exit(_("ERROR: %s") % e)
if __name__ == "__main__":
main()
|
{
"content_hash": "0f0b8fc4f7cefcc9023f67547f885846",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 28.641025641025642,
"alnum_prop": 0.6454789615040286,
"repo_name": "SripriyaSeetharam/tacker",
"id": "6c652096b302f379872a0ee214b91d16ecc280c6",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/cmd/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1204880"
},
{
"name": "Shell",
"bytes": "24370"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._capacity_reservation_groups_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CapacityReservationGroupsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2022_03_01.aio.ComputeManagementClient`'s
:attr:`capacity_reservation_groups` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
async def create_or_update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: _models.CapacityReservationGroup,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to create or update a capacity reservation group. When updating a capacity
reservation group, only tags may be modified. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Create capacity reservation Group. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to create or update a capacity reservation group. When updating a capacity
reservation group, only tags may be modified. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Create capacity reservation Group. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: Union[_models.CapacityReservationGroup, IO],
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to create or update a capacity reservation group. When updating a capacity
reservation group, only tags may be modified. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Create capacity reservation Group. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.CapacityReservationGroup]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "CapacityReservationGroup")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
capacity_reservation_group_name=capacity_reservation_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: _models.CapacityReservationGroupUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to update a capacity reservation group. When updating a capacity reservation
group, only tags may be modified.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Update capacity reservation Group operation.
Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroupUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to update a capacity reservation group. When updating a capacity reservation
group, only tags may be modified.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Update capacity reservation Group operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
parameters: Union[_models.CapacityReservationGroupUpdate, IO],
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation to update a capacity reservation group. When updating a capacity reservation
group, only tags may be modified.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param parameters: Parameters supplied to the Update capacity reservation Group operation. Is
either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroupUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.CapacityReservationGroup]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "CapacityReservationGroupUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
capacity_reservation_group_name=capacity_reservation_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, capacity_reservation_group_name: str, **kwargs: Any
) -> None:
"""The operation to delete a capacity reservation group. This operation is allowed only if all the
associated resources are disassociated from the reservation group and all capacity reservations
under the reservation group have also been deleted. Please refer to
https://aka.ms/CapacityReservation for more details.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
capacity_reservation_group_name=capacity_reservation_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
capacity_reservation_group_name: str,
expand: Optional[Union[str, _models.CapacityReservationGroupInstanceViewTypes]] = None,
**kwargs: Any
) -> _models.CapacityReservationGroup:
"""The operation that retrieves information about a capacity reservation group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param capacity_reservation_group_name: The name of the capacity reservation group. Required.
:type capacity_reservation_group_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the capacity reservations under the capacity reservation group
which is a snapshot of the runtime properties of a capacity reservation that is managed by the
platform and can change outside of control plane operations. "instanceView" Default value is
None.
:type expand: str or
~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroupInstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapacityReservationGroup or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CapacityReservationGroup]
request = build_get_request(
resource_group_name=resource_group_name,
capacity_reservation_group_name=capacity_reservation_group_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CapacityReservationGroup", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
expand: Optional[Union[str, _models.ExpandTypesForGetCapacityReservationGroups]] = None,
**kwargs: Any
) -> AsyncIterable["_models.CapacityReservationGroup"]:
"""Lists all of the capacity reservation groups in the specified resource group. Use the nextLink
property in the response to get the next page of capacity reservation groups.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param expand: The expand expression to apply on the operation. Based on the expand param(s)
specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are
associated to capacity reservation group in the response. Known values are:
"virtualMachineScaleSetVMs/$ref" and "virtualMachines/$ref". Default value is None.
:type expand: str or
~azure.mgmt.compute.v2022_03_01.models.ExpandTypesForGetCapacityReservationGroups
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CapacityReservationGroup or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CapacityReservationGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CapacityReservationGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups"} # type: ignore
@distributed_trace
def list_by_subscription(
self, expand: Optional[Union[str, _models.ExpandTypesForGetCapacityReservationGroups]] = None, **kwargs: Any
) -> AsyncIterable["_models.CapacityReservationGroup"]:
"""Lists all of the capacity reservation groups in the subscription. Use the nextLink property in
the response to get the next page of capacity reservation groups.
:param expand: The expand expression to apply on the operation. Based on the expand param(s)
specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are
associated to capacity reservation group in the response. Known values are:
"virtualMachineScaleSetVMs/$ref" and "virtualMachines/$ref". Default value is None.
:type expand: str or
~azure.mgmt.compute.v2022_03_01.models.ExpandTypesForGetCapacityReservationGroups
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CapacityReservationGroup or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_03_01.models.CapacityReservationGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.CapacityReservationGroupListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CapacityReservationGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups"} # type: ignore
|
{
"content_hash": "3944773809f74ff5300daa96656c92d9",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 210,
"avg_line_length": 47.71775417298938,
"alnum_prop": 0.6552820708516186,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b0e7aaa82412788fa13a964763b24061eceadfb1",
"size": "31946",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/aio/operations/_capacity_reservation_groups_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class SunConfig(AppConfig):
name = 'sun'
|
{
"content_hash": "a9a1b24bd579ea461a06552a987af0ff",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.2,
"alnum_prop": 0.7283950617283951,
"repo_name": "k00n/site_update_notifier",
"id": "6b26b5234f9c8e3d8999d4a4cbe6d3299a30accc",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "siteUpdateNotifier/sun/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12456"
}
],
"symlink_target": ""
}
|
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# [START drive_fetch_start_page_token]
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def fetch_start_page_token():
"""Retrieve page token for the current state of the account.
Returns & prints : start page token
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# create gmail api client
service = build('drive', 'v2', credentials=creds)
# pylint: disable=maybe-no-member
response = service.changes().getStartPageToken().execute()
print(F'Start token: {response.get("startPageToken")}')
except HttpError as error:
print(F'An error occurred: {error}')
response = None
return response.get('startPageToken')
if __name__ == '__main__':
fetch_start_page_token()
# [End drive_fetch_start_page_token]
|
{
"content_hash": "7b11fc94193dcd8ee9cc4eeb221b12ee",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 33.895833333333336,
"alnum_prop": 0.7197295636140135,
"repo_name": "googledrive/python-quickstart",
"id": "fb97be41b1e535a9ee61168adb08fa4c8c132f4c",
"size": "1627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drive/snippets/drive-v2/change snippet/fetch_start_page_token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1718"
}
],
"symlink_target": ""
}
|
from pyportfolio.models import Equity, Option, Future, Account, Trade, Commodity, Index, Currency, TradeList
from datetime import date
import unittest
AAPL = Equity(ticker='AAPL')
AAPL_option = Option(underlying=AAPL, expiry=date(2016, 6, 30), strike=105, type='call')
copper = Commodity(name='Copper')
cu_future = Future(underlying=copper, expiry=date(2020, 6, 30))
USD = Currency(name='USD')
trade = Trade(security=AAPL, amount=100, price=120, commission=7.99, currency=USD)
account = Account(name='Test Account')
account.add_trade(trade)
option_trade = Trade(security=AAPL_option, amount=100, price=20, commission=7.99, currency=USD)
class TestModels(unittest.TestCase):
def test_account(self):
self.assertEquals(account.positions[AAPL].amount, 100)
def test_trade(self):
self.assertEquals(trade.value, 100*120)
self.assertEquals(trade.commission, 7.99)
class TestCollections(unittest.TestCase):
def test_positions(self):
tl = TradeList()
tl.add_trade(trade)
self.assertEquals(tl.positions[AAPL].amount, 100)
self.assertEquals(tl.positions[AAPL].cost_basis, 100 * 120 + 7.99)
def test_to_csv(self):
tl = TradeList()
tl.add_trade(trade)
tl.add_trade(option_trade)
#tl.to_csv('test.csv')
|
{
"content_hash": "85e0d6de465774fe665696db2b196816",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 108,
"avg_line_length": 35.16216216216216,
"alnum_prop": 0.6979246733282091,
"repo_name": "davidastephens/pyportfolio",
"id": "8ca300a189835a574070d57d6a1fca94de4df2ff",
"size": "1301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyportfolio/models/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12096"
}
],
"symlink_target": ""
}
|
from flask_classy import route
from flask import request, jsonify, Response
from marvin.tools.query import doQuery, Query
from marvin.core.exceptions import MarvinError
from marvin.api.base import BaseView, arg_validate as av
from marvin.utils.db import get_traceback
import json
def _getCubes(searchfilter, **kwargs):
"""Run query locally at Utah."""
release = kwargs.pop('release', None)
kwargs['returnparams'] = kwargs.pop('params', None)
kwargs['returntype'] = kwargs.pop('rettype', None)
try:
# q, r = doQuery(searchfilter=searchfilter, returnparams=params, release=release,
# mode='local', returntype=rettype, limit=limit, order=order, sort=sort)
q, r = doQuery(searchfilter=searchfilter, release=release, **kwargs)
except Exception as e:
raise MarvinError('Query failed with {0}: {1}'.format(e.__class__.__name__, e))
results = r.results
# get the subset keywords
start = kwargs.get('start', None)
end = kwargs.get('end', None)
limit = kwargs.get('limit', None)
params = kwargs.get('params', None)
# get a subset
chunk = None
if start:
chunk = int(end) - int(start)
results = r.getSubset(int(start), limit=chunk)
chunk = limit if not chunk else limit
runtime = {'days': q.runtime.days, 'seconds': q.runtime.seconds, 'microseconds': q.runtime.microseconds}
output = dict(data=results, query=r.showQuery(), chunk=limit,
filter=searchfilter, params=q.params, returnparams=params, runtime=runtime,
queryparams_order=q.queryparams_order, count=len(results), totalcount=r.count)
return output
class QueryView(BaseView):
"""Class describing API calls related to queries."""
def index(self):
'''Returns general query info
.. :quickref: Query; Get general query info
:query string release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: data message
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": "this is a query!"
}
'''
self.results['data'] = 'this is a query!'
self.results['status'] = 1
return jsonify(self.results)
@route('/cubes/', methods=['GET', 'POST'], endpoint='querycubes')
@av.check_args(use_params='query', required='searchfilter')
def cube_query(self, args):
''' Performs a remote query
.. :quickref: Query; Perform a remote query
:query string release: the release of MaNGA
:form searchfilter: your string searchfilter expression
:form params: the list of return parameters
:form rettype: the string indicating your Marvin Tool conversion object
:form limit: the limiting number of results to return for large results
:form sort: a string parameter name to sort on
:form order: the order of the sort, either ``desc`` or ``asc``
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list results: the list of results
:json string query: the raw SQL string of your query
:json int chunk: the page limit of the results
:json string filter: the searchfilter used
:json list returnparams: the list of return parameters
:json list params: the list of parameters used in the query
:json list queryparams_order: the list of parameters used in the query
:json dict runtime: a dictionary of query time (days, minutes, seconds)
:json int totalcount: the total count of results
:json int count: the count in the current page of results
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cubes/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5", "searchfilter": "nsa.z<0.1"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"chunk": 100,
"count": 4,
"data": [["1-209232",8485,"8485-1901","1901",0.0407447],
["1-209113",8485,"8485-1902","1902",0.0378877],
["1-209191",8485,"8485-12701","12701",0.0234253],
["1-209151",8485,"8485-12702","12702",0.0185246]
],
"filter": "nsa.z<0.1",
"params": ["cube.mangaid","cube.plate","cube.plateifu","ifu.name","nsa.z"],
"query": "SELECT ... FROM ... WHERE ...",
"queryparams_order": ["mangaid","plate","plateifu","name","z"],
"returnparams": null,
"runtime": {"days": 0,"microseconds": 55986,"seconds": 0},
"totalcount": 4
}
'''
searchfilter = args.pop('searchfilter', None)
# searchfilter = self.results['inconfig'].get('searchfilter', None)
# params = self.results['inconfig'].get('params', None)
# rettype = self.results['inconfig'].get('returntype', None)
# limit = self.results['inconfig'].get('limit', 100)
# sort = self.results['inconfig'].get('sort', None)
# order = self.results['inconfig'].get('order', 'asc')
# release = self.results['inconfig'].get('release', None)
try:
# res = _getCubes(searchfilter, params=params, rettype=rettype,
# limit=limit, sort=sort, order=order, release=release)
res = _getCubes(searchfilter, **args)
except MarvinError as e:
self.results['error'] = str(e)
self.results['traceback'] = get_traceback(asstring=True)
else:
self.results['status'] = 1
self.update_results(res)
# this needs to be json.dumps until sas-vm at Utah updates to 2.7.11
return Response(json.dumps(self.results), mimetype='application/json')
@route('/cubes/getsubset/', methods=['GET', 'POST'], endpoint='getsubset')
@av.check_args(use_params='query', required=['searchfilter', 'start', 'end'])
def query_getsubset(self, args):
''' Remotely grab a subset of results from a query
.. :quickref: Query; Grab a subset of results from a remote query
:query string release: the release of MaNGA
:form searchfilter: your string searchfilter expression
:form params: the list of return parameters
:form rettype: the string indicating your Marvin Tool conversion object
:form start: the starting page index of results you wish to grab
:form end: the ending page index of the results you wish to grab
:form limit: the limiting number of results to return for large results
:form sort: a string parameter name to sort on
:form order: the order of the sort, either ``desc`` or ``asc``
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list results: the list of results
:json string query: the raw SQL string of your query
:json int chunk: the page limit of the results
:json string filter: the searchfilter used
:json list returnparams: the list of return parameters
:json list params: the list of parameters used in the query
:json list queryparams_order: the list of parameters used in the query
:json dict runtime: a dictionary of query time (days, minutes, seconds)
:json int totalcount: the total count of results
:json int count: the count in the current page of results
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cubes/getsubset/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5", "searchfilter": "nsa.z<0.1", "start":10, "end":15},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"chunk": 100,
"count": 4,
"data": [["1-209232",8485,"8485-1901","1901",0.0407447],
["1-209113",8485,"8485-1902","1902",0.0378877],
["1-209191",8485,"8485-12701","12701",0.0234253],
["1-209151",8485,"8485-12702","12702",0.0185246]
],
"filter": "nsa.z<0.1",
"params": ["cube.mangaid","cube.plate","cube.plateifu","ifu.name","nsa.z"],
"query": "SELECT ... FROM ... WHERE ...",
"queryparams_order": ["mangaid","plate","plateifu","name","z"],
"returnparams": null,
"runtime": {"days": 0,"microseconds": 55986,"seconds": 0},
"totalcount": 4
}
'''
searchfilter = args.pop('searchfilter', None)
# searchfilter = self.results['inconfig'].get('searchfilter', None)
# params = self.results['inconfig'].get('params', None)
# start = self.results['inconfig'].get('start', None)
# end = self.results['inconfig'].get('end', None)
# rettype = self.results['inconfig'].get('returntype', None)
# limit = self.results['inconfig'].get('limit', 100)
# sort = self.results['inconfig'].get('sort', None)
# order = self.results['inconfig'].get('order', 'asc')
# release = self.results['inconfig'].get('release', None)
try:
# res = _getCubes(searchfilter, params=params, start=int(start),
# end=int(end), rettype=rettype, limit=limit,
# sort=sort, order=order, release=release)
res = _getCubes(searchfilter, **args)
except MarvinError as e:
self.results['error'] = str(e)
self.results['traceback'] = get_traceback(asstring=True)
else:
self.results['status'] = 1
self.update_results(res)
# this needs to be json.dumps until sas-vm at Utah updates to 2.7.11
return Response(json.dumps(self.results), mimetype='application/json')
@route('/getparamslist/', methods=['GET', 'POST'], endpoint='getparams')
@av.check_args(use_params='query', required='paramdisplay')
def getparamslist(self, args):
''' Retrieve a list of all available input parameters into the query
.. :quickref: Query; Get a list of all or "best" queryable parameters
:query string release: the release of MaNGA
:form paramdisplay: ``all`` or ``best``, type of parameters to return
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list params: the list of queryable parameters
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/getparamslist/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": ['nsa.z', 'cube.ra', 'cube.dec', ...]
}
'''
paramdisplay = args.pop('paramdisplay', 'all')
q = Query(mode='local')
if paramdisplay == 'all':
params = q.get_available_params()
elif paramdisplay == 'best':
params = q.get_best_params()
self.results['data'] = params
self.results['status'] = 1
output = jsonify(self.results)
return output
@route('/cleanup/', methods=['GET', 'POST'], endpoint='cleanupqueries')
@av.check_args(use_params='query', required='task')
def cleanup(self, args):
''' Clean up idle server-side queries or retrieve the list of them
Do not use!
.. :quickref: Query; Send a cleanup command to the server-side database
:query string release: the release of MaNGA
:form task: ``clean`` or ``getprocs``, the type of task to run
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json string clean: clean success message
:json list procs: the list of processes currently running on the db
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cleanup/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": 'clean success'
}
'''
task = args.pop('task', None)
if task == 'clean':
q = Query(mode='local')
q._cleanUpQueries()
res = {'status': 1, 'data': 'clean success'}
elif task == 'getprocs':
q = Query(mode='local')
procs = q._getIdleProcesses()
procs = [{k: v for k, v in y.items()} for y in procs]
res = {'status': 1, 'data': procs}
else:
res = {'status': -1, 'data': None, 'error': 'Task is None or not in [clean, getprocs]'}
self.update_results(res)
output = jsonify(self.results)
return output
|
{
"content_hash": "2530287761c449468c1746b6a7c7d7bf",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 108,
"avg_line_length": 41.51122194513716,
"alnum_prop": 0.5842845127958669,
"repo_name": "bretthandrews/marvin",
"id": "5f1f22a0dabbb9931aa4c76c72a157e085ddc898",
"size": "16646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/marvin/api/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "210355"
},
{
"name": "HTML",
"bytes": "60149"
},
{
"name": "JavaScript",
"bytes": "207386"
},
{
"name": "Python",
"bytes": "921930"
},
{
"name": "SQLPL",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "1108"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <mozman@gmx.at>"
import unittest
# test helpers
from mytesttools import SimpleStructureChecker, create_node, get_n_random_tags
from ezodf2.nodeorganizer import PreludeEpilogueOrganizer
# objects to test
from ezodf2.nodestructurechecker import NodeStructureChecker
#all tags are single letter tags
PRELUDE_TAGS = 'abc'
EPILOGUE_TAGS = 'xyz'
ALLTAGS = list(PRELUDE_TAGS + 'ghi' + EPILOGUE_TAGS)
checker = SimpleStructureChecker(list(PRELUDE_TAGS), list(EPILOGUE_TAGS))
has_valid_structure = checker.has_valid_structure
class TestNodeStructureChecker(unittest.TestCase):
def test_valid_content(self):
node = create_node('aabbccghixxyyzz')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_invalid_content(self):
node = create_node('aabbccgHixxyyzz')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertFalse(validator.is_valid(node))
def test_valid_content_without_prelude(self):
node = create_node('ghixxyyzz')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_valid_content_without_epilogue(self):
node = create_node('aabbccghi')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_valid_content_only_midrange(self):
node = create_node('ghi')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_valid_content_one_tag(self):
node = create_node('g')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_valid_content_empty_tag(self):
node = create_node('')
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertTrue(validator.is_valid(node))
def test_reorder(self):
node = create_node(get_n_random_tags(50, list(PRELUDE_TAGS+'ghi'+EPILOGUE_TAGS)))
no = PreludeEpilogueOrganizer(PRELUDE_TAGS, EPILOGUE_TAGS)
no.reorder(node)
validator = NodeStructureChecker(PRELUDE_TAGS, 'ghi', EPILOGUE_TAGS)
self.assertEqual(has_valid_structure(node), validator.is_valid(node))
if __name__=='__main__':
unittest.main()
|
{
"content_hash": "4c1dd078ebdf89c74def98a0424df5a2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 89,
"avg_line_length": 38.44117647058823,
"alnum_prop": 0.685539403213466,
"repo_name": "iwschris/ezodf2",
"id": "2957bd4ff0dc104df13d980783fe59ce0991eb4c",
"size": "2761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_node_structure_checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "351944"
},
{
"name": "Shell",
"bytes": "4505"
}
],
"symlink_target": ""
}
|
"""
Use 'click' library to create an interface for shell execution
"""
from libs import get_logger
logger = get_logger(__name__)
# Make this script a powerful command line program
import click
from libs.bash import BashCommands as basher
from libs.irodscommands import EudatICommands
from libs.config import MyConfig
from libs import appconfig
############################
# Click commands grouping
@click.group()
@click.option('-v', '--verbose', count=True)
@click.option('--debug/--no-debug', default=False)
@click.option('--mock/--no-mock', default=False)
@click.pass_context
def cli(ctx, verbose, debug, mock):
logger.debug('Script init. Verbosity: %s' % verbose)
logger.debug('Debug: %s' % debug)
if mock:
appconfig.set('devel')
else:
appconfig.set('production')
# Do we have iRODS?
icom = EudatICommands()
# Make sure we have an ini file for futures callback
configurer = MyConfig(icom)
configurer.check()
# Save context
ctx.obj['VERBOSE'] = verbose
ctx.obj['DEBUG'] = debug
ctx.obj['MOCK'] = mock
ctx.obj['icom'] = icom
ctx.obj['conf'] = configurer
#print(dir(ctx))
############################
# Option 1. Filling data inside irods
@click.command()
@click.option('--size', default=10, type=int, \
help='number of elements to find and convert')
@click.pass_context
def popolae(ctx, size):
logger.info('COMMAND: Filling irods')
com = basher() # system commands, only needed for this command
remove_irods_existing = appconfig.mocking()
from libs.service_supply import fill_irods_random
fill_irods_random(com, ctx.obj['icom'], size, remove_irods_existing)
cli.add_command(popolae)
############################
# Option 2. Converting data from irods to a graph
@click.command()
@click.option('--elements', default=0, type=int, \
help='number of elements to find and convert') #note: 0 is all
@click.pass_context
def convert(ctx, elements):
logger.info('COMMAND: Converting iRODS objects inside a modeled graphdb')
# Loading the library opens the graph connection
from libs.graph import graph # only needed for this command
#remove_graph_existing = appconfig.mocking()
remove_graph_existing = True #DEBUG
from libs.service_supply import fill_graph_from_irods
fill_graph_from_irods(ctx.obj['icom'], graph, elements, remove_graph_existing)
cli.add_command(convert)
|
{
"content_hash": "63b81b1e2713f2a58eb5d45e17089035",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 82,
"avg_line_length": 31.789473684210527,
"alnum_prop": 0.6767384105960265,
"repo_name": "pdonorio/irods2graph",
"id": "8493055d465b38c1cc7a7658428600b8db939971",
"size": "2465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irodsgraph/libs/cliinterface.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "45801"
},
{
"name": "Python",
"bytes": "44385"
},
{
"name": "Rebol",
"bytes": "1288"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
}
|
import os
import sys
if (len(sys.argv) < 2):
print "not enough parameters"
os._exit(1)
content = open(sys.argv[1]).readlines()
index = 0
for line in content :
if (index % 2):
print line,
print prev,
prev = line
index = index + 1
|
{
"content_hash": "6664a8602731187a027cd3e5b9dfce5f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 39,
"avg_line_length": 14.35,
"alnum_prop": 0.5435540069686411,
"repo_name": "PulseRain/Embedded",
"id": "079295c555da84691cb130698cf22e0062b964d8",
"size": "1752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/scripts/python/line_switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "625393"
},
{
"name": "C++",
"bytes": "96916"
},
{
"name": "CMake",
"bytes": "8328"
},
{
"name": "Lex",
"bytes": "6705"
},
{
"name": "Logos",
"bytes": "572"
},
{
"name": "Makefile",
"bytes": "22499"
},
{
"name": "Perl",
"bytes": "15650"
},
{
"name": "Perl6",
"bytes": "22479"
},
{
"name": "Python",
"bytes": "30905"
},
{
"name": "Shell",
"bytes": "2969"
},
{
"name": "SystemVerilog",
"bytes": "15951"
},
{
"name": "Tcl",
"bytes": "8336"
},
{
"name": "Yacc",
"bytes": "15615"
}
],
"symlink_target": ""
}
|
"""Execute the tests for the fiona program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import subprocess
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
class ResultChecker(object):
"""Check quality with compute_gain program."""
def __init__(self, compute_gain, path_to_ref, path_to_sam,
path_to_post, min_gain):
self.compute_gain = compute_gain
self.path_to_ref = path_to_ref
self.path_to_sam = path_to_sam
self.path_to_post = path_to_post
self.min_gain = min_gain
def __call__(self):
cmd_line = [self.compute_gain,
'-g', self.path_to_ref,
'--pre', self.path_to_sam,
'--post', self.path_to_post]
print ' '.join(cmd_line)
process = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retcode = process.wait()
if retcode != 0:
raise app_tests.BadResultException('compute_gain did not return 0')
report = process.stdout.read()
gain = float(report.splitlines()[2].split()[0])
if gain < self.min_gain:
fmt = 'Gain too low. Expected >= %f, got %f'
raise app_tests.BadResultException(fmt % (self.min_gain, gain))
return True
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for fiona'
print '========================'
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'core/apps/fiona/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_fiona = app_tests.autolocateBinary(
binary_base, 'core/apps/fiona', 'fiona')
path_to_fiona_illumina = app_tests.autolocateBinary(
binary_base, 'core/apps/fiona', 'fiona_illumina')
path_to_compute_gain = app_tests.autolocateBinary(
binary_base, 'core/apps/fiona', 'compute_gain')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# We prepare a list of transforms to apply to the output files. This is
# used to strip the input/output paths from the programs' output to
# make it more canonical and host independent.
ph.outFile('-') # To ensure that the out path is set.
transforms = [
app_tests.ReplaceTransform(
os.path.join(ph.source_base_path,
'core/apps/fiona/tests') + os.sep,
'', right=True),
app_tests.ReplaceTransform(ph.temp_dir + os.sep, '', right=True),
app_tests.NormalizeScientificExponentsTransform(),
]
# ============================================================
# Run on uniformly random DNA.
# ============================================================
# Note that instead of comparing the results with expected results, we
# use a checker that computes the gain and compares it with a threshold.
# Illumina Mode
for i in [1, 2]:
min_gain = {1: 40.0, 2: 50.0}
conf = app_tests.TestConf(
program=path_to_fiona_illumina,
args=['-nt', '1',
'-i', str(i),
'-g', '10000',
ph.inFile('reads.illumina.fq'),
ph.outFile('reads.illumina.corrected.i%d.fa' % i)],
redir_stdout=ph.outFile('reads.illumina.fq.i%d.stdout' % i),
redir_stderr=ph.outFile('reads.illumina.fq.i%d.stderr' % i),
check_callback=ResultChecker(
path_to_compute_gain, ph.inFile('genome.10k.fa'),
ph.inFile('reads.illumina.sam'),
ph.outFile('reads.illumina.corrected.i%d.fa' % i),
min_gain.get(i, 100.0)),
to_diff=[(ph.inFile('reads.illumina.fq.i%d.stdout' % i),
ph.outFile('reads.illumina.fq.i%d.stdout' % i),
transforms),
(ph.inFile('reads.illumina.fq.i%d.stderr' % i),
ph.outFile('reads.illumina.fq.i%d.stderr' % i),
transforms),
])
conf_list.append(conf)
# Indel Mode
for i in [1, 2]:
min_gain = {1: 70.0, 2: 85.0}
conf = app_tests.TestConf(
program=path_to_fiona,
args=['-nt', '1',
'-i', str(i),
'-g', '10000',
ph.inFile('reads.454.fq'),
ph.outFile('reads.454.corrected.i%d.fa' % i)],
redir_stdout=ph.outFile('reads.454.fq.i%d.stdout' % i),
redir_stderr=ph.outFile('reads.454.fq.i%d.stderr' % i),
check_callback=ResultChecker(
path_to_compute_gain, ph.inFile('genome.10k.fa'),
ph.inFile('reads.454.sam'),
ph.outFile('reads.454.corrected.i%d.fa' % i),
min_gain.get(i, 100.0)),
to_diff=[(ph.inFile('reads.454.fq.i%d.stdout' % i),
ph.outFile('reads.454.fq.i%d.stdout' % i),
transforms),
(ph.inFile('reads.454.fq.i%d.stderr' % i),
ph.outFile('reads.454.fq.i%d.stderr' % i),
transforms),
])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(conf.commandLineArgs())
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
{
"content_hash": "e9d6387546f2193e8f777a136d1ea32f",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 37.38251366120219,
"alnum_prop": 0.5132290600789359,
"repo_name": "holtgrewe/seqan",
"id": "db8b874351e800256ce9a938839fa6e3a4be2f79",
"size": "6863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/apps/fiona/tests/run_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "10606"
},
{
"name": "C",
"bytes": "155313"
},
{
"name": "C++",
"bytes": "21416742"
},
{
"name": "CSS",
"bytes": "450080"
},
{
"name": "JavaScript",
"bytes": "158682"
},
{
"name": "Makefile",
"bytes": "7301"
},
{
"name": "Objective-C",
"bytes": "437103"
},
{
"name": "PHP",
"bytes": "48846"
},
{
"name": "Perl",
"bytes": "212425"
},
{
"name": "Prolog",
"bytes": "53838"
},
{
"name": "Python",
"bytes": "2053658"
},
{
"name": "R",
"bytes": "34940"
},
{
"name": "Shell",
"bytes": "98797"
},
{
"name": "TeX",
"bytes": "13746"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/corellia/shared_guild_commerce_corellia_style_01.iff"
result.attribute_template_id = -1
result.stfName("building_name","guild_commerce_corellia")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "2880bcdfe17d37e087c4a2cf98693da5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.7151335311572701,
"repo_name": "anhstudios/swganh",
"id": "45ac252c97d59c1e394b86999da98d4519336771",
"size": "482",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/corellia/shared_guild_commerce_corellia_style_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import gettext
import json
from os import path
from django.conf import settings
from django.test import (
RequestFactory, SimpleTestCase, TestCase, ignore_warnings, modify_settings,
override_settings,
)
from django.test.selenium import SeleniumTestCase
from django.urls import reverse
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.translation import (
LANGUAGE_SESSION_KEY, get_language, override,
)
from django.views.i18n import JavaScriptCatalog, get_formats
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class SetLanguageTests(TestCase):
"""Test the django.views.i18n.set_language view."""
def _get_inactive_language_code(self):
"""Return language code for a language which is not activated."""
current_language = get_language()
return [code for code, name in settings.LANGUAGES if not code == current_language][0]
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i_should_not_be_used/')
self.assertRedirects(response, '/')
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# The language is set in a cookie.
language_cookie = self.client.cookies[settings.LANGUAGE_COOKIE_NAME]
self.assertEqual(language_cookie.value, lang_code)
self.assertEqual(language_cookie['domain'], '')
self.assertEqual(language_cookie['path'], '/')
self.assertEqual(language_cookie['max-age'], '')
self.assertEqual(language_cookie['httponly'], '')
self.assertEqual(language_cookie['samesite'], '')
self.assertEqual(language_cookie['secure'], '')
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_http_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe" and its scheme is https if the request was sent over https.
"""
lang_code = self._get_inactive_language_code()
non_https_next_url = 'http://testserver/redirection/'
post_data = {'language': lang_code, 'next': non_https_next_url}
# Insecure URL in POST data.
response = self.client.post('/i18n/setlang/', data=post_data, secure=True)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# Insecure URL in HTTP referer.
response = self.client.post('/i18n/setlang/', secure=True, HTTP_REFERER=non_https_next_url)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_redirect_to_referer(self):
"""
The set_language view redirects to the URL in the referer header when
there isn't a "next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i18n/')
self.assertRedirects(response, '/i18n/', fetch_redirect_response=False)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_default_redirect(self):
"""
The set_language view redirects to '/' when there isn't a referer or
"next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data)
self.assertRedirects(response, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_performs_redirect_for_ajax_if_explicitly_requested(self):
"""
The set_language view redirects to the "next" parameter for requests
not accepting HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertRedirects(response, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_redirect_to_referer_for_ajax(self):
"""
The set_language view doesn't redirect to the HTTP referer header if
the request doesn't accept HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
headers = {'HTTP_REFERER': '/', 'HTTP_ACCEPT': 'application/json'}
response = self.client.post('/i18n/setlang/', post_data, **headers)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_default_redirect_for_ajax(self):
"""
The set_language view returns 204 by default for requests not accepting
HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next_for_ajax(self):
"""
The fallback to root URL for the set_language view works for requests
not accepting HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
def test_session_language_deprecation(self):
msg = (
'The user language will no longer be stored in request.session '
'in Django 4.0. Read it from '
'request.COOKIES[settings.LANGUAGE_COOKIE_NAME] instead.'
)
with self.assertRaisesMessage(RemovedInDjango40Warning, msg):
self.client.session[LANGUAGE_SESSION_KEY]
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = {
'MIDDLEWARE': ['django.middleware.common.CommonMiddleware'],
'LANGUAGE_COOKIE_NAME': 'mylanguage',
'LANGUAGE_COOKIE_AGE': 3600 * 7 * 2,
'LANGUAGE_COOKIE_DOMAIN': '.example.com',
'LANGUAGE_COOKIE_PATH': '/test/',
'LANGUAGE_COOKIE_HTTPONLY': True,
'LANGUAGE_COOKIE_SAMESITE': 'Strict',
'LANGUAGE_COOKIE_SECURE': True,
}
with self.settings(**test_settings):
post_data = {'language': 'pl', 'next': '/views/'}
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
self.assertIs(language_cookie['httponly'], True)
self.assertEqual(language_cookie['samesite'], 'Strict')
self.assertIs(language_cookie['secure'], True)
def test_setlang_decodes_http_referer_url(self):
"""
The set_language view decodes the HTTP_REFERER URL.
"""
# The URL & view must exist for this to work as a regression test.
self.assertEqual(reverse('with_parameter', kwargs={'parameter': 'x'}), '/test-setlang/x/')
lang_code = self._get_inactive_language_code()
encoded_url = '/test-setlang/%C3%A4/' # (%C3%A4 decodes to ä)
response = self.client.post('/i18n/setlang/', {'language': lang_code}, HTTP_REFERER=encoded_url)
self.assertRedirects(response, encoded_url, fetch_redirect_response=False)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
@modify_settings(MIDDLEWARE={
'append': 'django.middleware.locale.LocaleMiddleware',
})
def test_lang_from_translated_i18n_pattern(self):
response = self.client.post(
'/i18n/setlang/', data={'language': 'nl'},
follow=True, HTTP_REFERER='/en/translated/'
)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'nl')
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'nl')
self.assertRedirects(response, '/nl/vertaald/')
# And reverse
response = self.client.post(
'/i18n/setlang/', data={'language': 'en'},
follow=True, HTTP_REFERER='/nl/vertaald/'
)
self.assertRedirects(response, '/en/translated/')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NViewTests(SimpleTestCase):
"""Test django.views.i18n views other than set_language."""
@override_settings(LANGUAGE_CODE='de')
def test_get_formats(self):
formats = get_formats()
# Test 3 possible types in get_formats: integer, string, and list.
self.assertEqual(formats['FIRST_DAY_OF_WEEK'], 0)
self.assertEqual(formats['DECIMAL_SEPARATOR'], '.')
self.assertEqual(formats['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
trans_txt = catalog.gettext('this is to be translated')
response = self.client.get('/jsi18n/')
self.assertEqual(response['Content-Type'], 'text/javascript; charset="utf-8"')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check Unicode strings.
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, '"month name\\u0004May": "mai"', 1)
@override_settings(USE_I18N=False)
def test_jsi18n_USE_I18N_False(self):
response = self.client.get('/jsi18n/')
# default plural function
self.assertContains(response, 'django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };')
self.assertNotContains(response, 'var newcatalog =')
def test_jsoni18n(self):
"""
The json_catalog returns the language catalog and settings as JSON.
"""
with override('de'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertEqual(data['formats']['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
self.assertEqual(data['formats']['FIRST_DAY_OF_WEEK'], 0)
self.assertIn('plural', data)
self.assertEqual(data['catalog']['month name\x04May'], 'Mai')
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertEqual(data['plural'], '(n != 1)')
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsoni18n_with_missing_en_files(self):
"""
Same as above for the json_catalog view. Here we also check for the
expected JSON format.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog'], {})
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertIsNone(data['plural'])
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
self.assertNotContains(response, "Untranslated string")
def test_i18n_fallback_language_plural(self):
"""
The fallback to a language with less plural forms maintains the real
language's number of plural forms and correct translations.
"""
with self.settings(LANGUAGE_CODE='pt'), override('ru'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3 p3', '{count} plural3 p3s', '{count} plural3 p3t']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s', '']
)
with self.settings(LANGUAGE_CODE='ru'), override('pt'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3', '{count} plural3s']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s']
)
def test_i18n_english_variant(self):
with override('en-gb'):
response = self.client.get('/jsi18n/')
self.assertIn(
'"this color is to be translated": "this colour is to be translated"',
response.context['catalog_str']
)
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
base_trans_string = 'il faut traduire cette cha\\u00eene de caract\\u00e8res de '
app1_trans_string = base_trans_string + 'app1'
app2_trans_string = base_trans_string + 'app2'
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app1/')
self.assertContains(response, app1_trans_string)
self.assertNotContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app2/')
self.assertNotContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + [
path.join(
path.dirname(path.dirname(path.abspath(__file__))),
'app3',
'locale',
),
]
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_unknown_package_error(self):
view = JavaScriptCatalog.as_view()
request = RequestFactory().get('/')
msg = 'Invalid package(s) provided to JavaScriptCatalog: unknown_package'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package')
msg += ',unknown_package2'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package+unknown_package2')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18nSeleniumTests(SeleniumTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get(self.live_server_url + '/jsi18n_template/')
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("ngettext_onnonplural")
self.assertEqual(elem.text, "Bild")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
elem = self.selenium.find_element_by_id("formats")
self.assertEqual(
elem.text,
"DATE_INPUT_FORMATS is an object; DECIMAL_SEPARATOR is a string; FIRST_DAY_OF_WEEK is a number;"
)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
@override_settings(LANGUAGE_CODE='fr')
def test_multiple_catalogs(self):
self.selenium.get(self.live_server_url + '/jsi18n_multi_catalogs/')
elem = self.selenium.find_element_by_id('app1string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app1')
elem = self.selenium.find_element_by_id('app2string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app2')
|
{
"content_hash": "cb5a481d33dcffd6b2aea474f35804ae",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 109,
"avg_line_length": 48.514910536779325,
"alnum_prop": 0.6354136786460681,
"repo_name": "googleinterns/django",
"id": "fcbffa711d5aa6ebb84e0e7381004fc8455d0cd2",
"size": "24408",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79183"
},
{
"name": "HTML",
"bytes": "228941"
},
{
"name": "JavaScript",
"bytes": "136792"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "14076970"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
__author__ = 'Mario'
__author__ = 'Mario'
import numpy as np
from scipy.stats import multivariate_normal as norm
import pandas as pd
import matplotlib.pyplot as plt
dataTraining = pd.read_table('./Data/iris_training.txt',delim_whitespace=True, header=None)
dataTest = pd.read_table('./Data/iris_test.txt',delim_whitespace=True, header=None)
#
# dataTraining = pd.read_table('./Data/wine_uci_train.txt',delim_whitespace=True, header=None)
# dataTest = pd.read_table('./Data/wine_uci_test.txt',delim_whitespace=True, header=None)
n = len(dataTraining)
nClassifiers = len(dataTraining.loc[0])
nTypesIris = 3
def readIrisTraining():
pass
# print dataIris[:3]
# print "this is: ", dataIris[0]
# print n
def gaussian(x, mu, cov):
# cov = np.cov([dataIris[dataIris[0]==1][1],dataIris[dataIris[0]==1][2]],
# dataIris[dataIris[0]==1][3],dataIris[dataIris[0]==1][4])
# print cov
# sigma1 = np.average(sigma)
# x = np.linspace(-4*sigma1,4*sigma1,4)
normPDF = norm.pdf(x,mu,cov)
return normPDF
def estimatedParameters(xArray):
mu = []
sigma = []
for i in range(1,nClassifiers):
mu.append(xArray[i].mean())
sigma.append((1.0/n)*(np.sum((xArray[i]-mu[i-1])**2)))
return mu, sigma
readIrisTraining()
parametersMu = []
parametersSigma = []
parametersCov = []
indexTable = dataTraining[dataTraining[0]]
for i in range(1,nTypesIris+1):
mu, sigma = estimatedParameters(dataTraining[dataTraining[0]==i])
parametersMu.append(mu)
parametersSigma.append(sigma)
covList = []
for j in range(1, nClassifiers):
covList.append(dataTraining[dataTraining[0]==i][1])
cov = np.cov([dataTraining[dataTraining[0]==i][1],dataTraining[dataTraining[0]==i][2],
dataTraining[dataTraining[0]==i][3],dataTraining[dataTraining[0]==i][4]])
cov = np.cov(np.array(covList))
parametersCov.append(cov)
parametersLen = len(parametersMu)
SuccessList = []
for i in range(0, len(dataTest)):
tempXMaximum = 0
maxDistribution = 0
x = dataTest.loc[i]
for j in range(0,parametersLen):
tempX = (gaussian(x[1:], parametersMu[j], parametersCov[j]))
if tempX > tempXMaximum:
tempXMaximum = tempX
maxDistribution = j
if dataTest[0][i] == maxDistribution+1:
SuccessList.append(1)
else:
SuccessList.append(0)
# print(SuccessList)
print(np.average(SuccessList))
# np.cov([dataIris[dataIris[0]==1][1],dataIris[dataIris[0]==1][2],dataIris[dataIris[0]==1][3],dataIris[dataIris[0]==1][4]])
|
{
"content_hash": "62b7f5d886e562563176f8b9736a8f07",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 123,
"avg_line_length": 32.40506329113924,
"alnum_prop": 0.65703125,
"repo_name": "marioharper182/Patterns",
"id": "f07cc28b612f17186438569727d18c8df6e49554",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PatternRecognition/ProgrammingProject1/MaxLiklihood.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25163"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_mds_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'mds'
api_call_object_plural_version = 'mdss'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointMdsFacts(object):
module = cp_mgmt_mds_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
{
"content_hash": "a9f07d06b0b2f8bf547ca01d6058f445",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 133,
"avg_line_length": 32.34848484848485,
"alnum_prop": 0.6721311475409836,
"repo_name": "thaim/ansible",
"id": "e9a145cd1b755994031caa458df31167184649e2",
"size": "2815",
"binary": false,
"copies": "18",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/check_point/test_cp_mgmt_mds_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_literal_map_entry257
except ImportError:
btp_literal_map_entry257 = sys.modules[
"onshape_client.oas.models.btp_literal_map_entry257"
]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPLiteralMap256(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"atomic": (bool,), # noqa: E501
"bt_type": (str,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"entries": (
[btp_literal_map_entry257.BTPLiteralMapEntry257],
), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"space_in_empty_list": (btp_space10.BTPSpace10,), # noqa: E501
"start_source_location": (int,), # noqa: E501
"trailing_comma": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"atomic": "atomic", # noqa: E501
"bt_type": "btType", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"entries": "entries", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"space_in_empty_list": "spaceInEmptyList", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
"trailing_comma": "trailingComma", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_literal_map256.BTPLiteralMap256 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
atomic (bool): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
entries ([btp_literal_map_entry257.BTPLiteralMapEntry257]): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
space_in_empty_list (btp_space10.BTPSpace10): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
trailing_comma (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "6fa6cb4940e720a2918375ae57c97274",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 96,
"avg_line_length": 37.381188118811885,
"alnum_prop": 0.5701231624950338,
"repo_name": "onshape-public/onshape-clients",
"id": "715f58305937a4c527a2e199a58a29c193d09cdb",
"size": "7568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/btp_literal_map256.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
}
|
import csv,sys
import urllib
##### append some python libraries
sys.path.append("/usr/local/lib/python2.7/dist-packages") #TODO: change to a relative path
from netaddr import *
import redis
import splunk.mining.dcutils as dcu
# use the default splunk logger function for alerting
logger = dcu.getLogger()
# set the connection string to Redis DB
redis_server = '127.0.0.1'
redis_port = 6379
redis_ipdb = 0 # for IP lookups
redis_domaindb=1 # TODO: add a domain lookup
def ip_threat(clientip, red):
threatsource=[]
threatcategory=[]
threatscore=[]
#Find clientip in IP DB list
try:
temp = red.smembers('ip:'+clientip)
except:
logger.error('module="ThreatDB", message="Error on ThreatDB query"')
return (clientip,'','','')
if len(temp)!= 0:
for i in temp:
temp_arr=i.split(':')
threatsource.append(temp_arr[0])
threatcategory.append(temp_arr[1])
threatscore.append(temp_arr[2])
return (clientip, ",".join(threatsource), ",".join(threatcategory), ",".join(threatscore))
#Find clientip in NETs DB list
if len(temp) == 0:
try:
ip = IPAddress(clientip)
except:
return (clientip, "none", "none", "0")
threat_nets=[]
# Find all networks which have equal first octet
for i in red.sscan_iter(name='net:index',match=str(ip.words[0])+'*',count=500): #search by fetching 500 values per block
net = IPNetwork(i)
#Check if IP from this network
if ip in net:
nets_list = red.smembers('net:'+str(net))
if len(nets_list) != 0:
threat_nets = threat_nets + list(nets_list)
if len(threat_nets) == 0:
return (clientip, "none", "none", "0")
for i in threat_nets:
temp_arr=i.split(':')
threatsource.append(temp_arr[0])
threatcategory.append(temp_arr[1])
threatscore.append(temp_arr[2])
return (clientip, ",".join(threatsource), ",".join(threatcategory), ",".join(threatscore))
def main():
if len(sys.argv) != 3:
print "Usage: python redislookup.py [key field] [value field]"
sys.exit(0)
in_field = sys.argv[1]
out_field = sys.argv[2]
conn_db=0
if in_field == 'clientip': #TODO: change 'clientip' to 'ip' everywhere
conn_db = redis_ipdb
try:
redis_pool = redis.ConnectionPool(host=redis_server, port=redis_port, db=conn_db, socket_timeout=2)
redis_conn = redis.Redis(connection_pool=redis_pool)
redis_conn.ping()
except:
logger.error('module="ThreatDB", message="No ThreatDB connection"')
return (clientip,'','','')
r = csv.reader(sys.stdin)
w = csv.writer(sys.stdout)
header = []
first = True
for line in r:
if first:
header = line
if in_field not in header:
print "IP field must exist in CSV data"
sys.exit(0)
csv.writer(sys.stdout).writerow(header)
w = csv.DictWriter(sys.stdout, header)
first = False
continue
# Read the result
result = {}
i = 0
while i < len(header):
if i < len(line):
result[header[i]] = line[i]
else:
result[header[i]] = ''
i += 1
# If CLIENTIP is set
if in_field == 'clientip' and len(result[in_field]):
ip_address, threat_source, threat_category, threat_score = ip_threat(str(result[in_field]),redis_conn)
out = '%s,"%s","%s","%s"' % (ip_address, threat_source, threat_category, threat_score)
print out
redis_pool.disconnect()
main()
|
{
"content_hash": "e1d7a243cd3885c6ba08105513accd42",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 128,
"avg_line_length": 33.025862068965516,
"alnum_prop": 0.5638214565387627,
"repo_name": "rstcloud/threatdb",
"id": "1e0f2bf4ba2adbac66aac6f31687585b73ad171a",
"size": "4036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threatDB/bin/redisworker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21097"
},
{
"name": "Shell",
"bytes": "3097"
}
],
"symlink_target": ""
}
|
"""
Parser for the .lang translation format.
"""
import codecs
import re
import sys
from collections import namedtuple
from parsimonious.exceptions import ParseError as ParsimoniousParseError
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from pontoon.base.formats.base import ParseError, ParsedResource
from pontoon.base.vcs_models import VCSTranslation
# Use a class to store comments so we can distinguish them from entities.
LangComment = namedtuple('LangComment', ['content'])
BLANK_LINE = 'blank_line'
TAG_REGEX = re.compile(r'\{(ok|l10n-extra)\}')
class LangEntity(VCSTranslation):
def __init__(self, source_string, translation_string, tags):
super(LangEntity, self).__init__(
key=source_string, # Langfiles use the source as the key.
source_string=source_string,
strings={None: translation_string}, # Langfiles lack plural support
comments=[],
fuzzy=False, # Langfiles don't support fuzzy status
)
self.tags = set(tags)
# If the translation matches the source string without the {ok}
# tag, then the translation isn't actually valid, so we remove
# it.
if source_string == translation_string and 'ok' not in tags:
del self.strings[None]
@property
def extra(self):
return {'tags': list(self.tags)}
class LangResource(ParsedResource):
def __init__(self, path, children):
self.path = path
self.children = children
@property
def translations(self):
return [c for c in self.children if isinstance(c, LangEntity)]
def save(self, locale):
with codecs.open(self.path, 'w', 'utf-8') as f:
for child in self.children:
if isinstance(child, LangEntity):
self.write_entity(f, child)
elif isinstance(child, LangComment):
self.write_comment(f, child)
elif child == BLANK_LINE:
f.write(u'\n')
def write_entity(self, f, entity):
for comment in entity.comments:
f.write(u'# {0}\n'.format(comment))
f.write(u';{0}\n'.format(entity.source_string))
translation = entity.strings.get(None, None)
if translation is None:
# No translation? Output the source string and remove {ok}.
translation = entity.source_string
entity.tags.discard('ok')
elif translation == entity.source_string:
# Translation is equal to the source? Include {ok}.
entity.tags.add('ok')
elif translation != entity.source_string:
# Translation is different? Remove {ok}, it's unneeded.
entity.tags.discard('ok')
if entity.extra.get('tags'):
tags = [u'{{{tag}}}'.format(tag=t) for t in entity.tags]
translation = u'{translation} {tags}'.format(
translation=translation,
tags=u' '.join(tags)
)
f.write(u'{0}\n'.format(translation))
def write_comment(self, f, comment):
f.write(u'## {0}\n'.format(comment.content))
class LangVisitor(NodeVisitor):
grammar = Grammar(r"""
lang_file = (comment / entity / blank_line)*
comment = "#"+ line_content line_ending
line_content = ~r".*"
line_ending = ~r"$\n?"m # Match at EOL and EOF without newline.
blank_line = ~r"((?!\n)\s)*" line_ending
entity = string translation
string = ";" line_content line_ending
translation = line_content line_ending
""")
def visit_lang_file(self, node, children):
"""
Remove comments that are associated with an entity and add them
to the entity's comments list instead.
"""
new_children = []
comments = []
for child in children:
if isinstance(child, LangComment):
comments.append(child)
continue
# Add comments to entity, or if there is no entity, leave
# them in the list of children.
if isinstance(child, LangEntity):
child.comments = [c.content for c in comments]
else:
new_children += comments
comments = []
new_children.append(child)
return new_children
def visit_comment(self, node, (marker, content, end)):
return LangComment(content.text.strip())
def visit_blank_line(self, node, (whitespace, newline)):
return BLANK_LINE
def visit_entity(self, node, (string, translation)):
# Strip tags out of translation if they exist.
tags = []
tag_matches = list(re.finditer(TAG_REGEX, translation))
if tag_matches:
tags = [m.group(1) for m in tag_matches]
translation = translation[:tag_matches[0].start()].strip()
return LangEntity(string, translation, tags)
def visit_entity_comment(self, node, (marker, content, end)):
return content.text.strip()
def visit_string(self, node, (marker, content, end)):
return content.text.strip()
def visit_translation(self, node, (content, end)):
return content.text.strip()
def generic_visit(self, node, children):
if children and len(children) == 1:
return children[0]
else:
return children or node
def parse(path, source_path=None):
# Read as utf-8-sig in case there's a BOM at the start of the file
# that we want to remove.
with codecs.open(path, 'r', 'utf-8-sig') as f:
content = f.read()
try:
children = LangVisitor().parse(content)
except ParsimoniousParseError as err:
wrapped = ParseError(u'Failed to parse {path}: {err}'.format(path=path, err=err))
raise wrapped, None, sys.exc_info()[2]
return LangResource(path, children)
|
{
"content_hash": "f0d28b1e60dcd176ee430e6310afe391",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 89,
"avg_line_length": 33.044444444444444,
"alnum_prop": 0.6030598520511096,
"repo_name": "vivekanand1101/pontoon",
"id": "bb4de7c1af024f6550f07278def7ce33b2311701",
"size": "5948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/base/formats/lang.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132663"
},
{
"name": "HTML",
"bytes": "56698"
},
{
"name": "JavaScript",
"bytes": "800688"
},
{
"name": "Python",
"bytes": "403292"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
import pygame
class Muro(pygame.sprite.Sprite):
"""Clase para controlar los muros que hay distribuidos por el mapa"""
"""
Constructor de la clase
"""
def __init__(self,posx,posy,tipoLadrillo):
pygame.sprite.Sprite.__init__(self)
self.tipoLadrillo = tipoLadrillo
if tipoLadrillo:#cargar el tipo de muro indicado
self.imageMuro = pygame.image.load('Imagenes/ladrillosPeque.png')
self.imageMuroGolpe1 = pygame.image.load('Imagenes/ladrillosPequeGolpe1.png')
else:
self.imageMuro = pygame.image.load('Imagenes/piedraPeque.png')
self.imageMuroGolpe1 = pygame.image.load('Imagenes/piedraPequeGolpe1.png')
self.imageMuroGolpe2 = pygame.image.load('Imagenes/piedraPequeGolpe2.png')
self.rect = self.imageMuro.get_rect()#obtener el rectangulo de la imagen
if tipoLadrillo:
self.resistencia = 2 #resistencia del muro a disparos
else:
self.resistencia = 3
self.rect.top = posy #posicion donde aparecera el muro
self.rect.left = posx
"""
Metodo que dibuja el muro
"""
def dibujar(self,superficie):
if self.tipoLadrillo:
if self.resistencia == 2:
superficie.blit(self.imageMuro, self.rect)
else:
superficie.blit(self.imageMuroGolpe1, self.rect)
else:
if self.resistencia == 3:
superficie.blit(self.imageMuro, self.rect)
elif self.resistencia == 2:
superficie.blit(self.imageMuroGolpe1, self.rect)
else:
superficie.blit(self.imageMuroGolpe2, self.rect)
def restarVida(self):
self.resistencia -= 1
if self.resistencia > 0:
return True
else:
return False
|
{
"content_hash": "a7293897142ea809225539e83e6d835f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 89,
"avg_line_length": 34.36842105263158,
"alnum_prop": 0.5747830525778458,
"repo_name": "DavidRamirez19/Total-Demolition---PYGAME",
"id": "57bf79ffdaf4ec46f1a93c66e8af736e4655d78a",
"size": "1959",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Total Demolition 1.4/Clases/Muro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "347172"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import sys
import collections
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import torchfold
from program_synthesis.common.models import beam_search
from program_synthesis.common.modules import decoders
from program_synthesis.common.modules import seq2seq
from program_synthesis.algolisp.dataset import data
from program_synthesis.algolisp.dataset import executor
from program_synthesis.algolisp.models import prepare_spec
from program_synthesis.algolisp.models.base import InferenceResult, MaskedMemory, get_attn_mask
from program_synthesis.algolisp.models.seq2code_model import Seq2CodeModel
from program_synthesis.algolisp.models.modules import encoders
class Spec2Seq(seq2seq.Sequence2Sequence):
def __init__(self, input_vocab_size, output_vocab_size, args):
super(Spec2Seq, self).__init__(
input_vocab_size, output_vocab_size, args, encoder_cls=encoders.SpecEncoder
)
def encode_text(self, inputs):
# inputs: PackedSequencePlus
return self.encoder.text_encoder(inputs)
def encode_io(self, input_keys, inputs, arg_nums, outputs):
input_keys_embed = self.decoder.embed(input_keys)
return self.encoder.io_encoder(input_keys_embed, inputs, arg_nums, outputs)
def encode_code(self, code_seqs):
# code_seqs: PackedSequencePlus
return self.encoder.code_encoder(code_seqs.apply(self.decoder.embed))
def encode_trace(self, prepared_trace):
return self.encoder.trace_encoder(prepared_trace)
def extend_tensors(
self, code_info, batch_size, batch_ids):
# TODO: should be a separate module probably with its parameters.
if code_info:
code_enc, code_memory, orig_seq_lengths = code_info
# Every item in the batch has code.
if len(batch_ids) == batch_size:
return code_enc, code_memory, orig_seq_lengths
# Otherwise, stagger empty encodings/memories with real ones
enc_to_stack = [self.empty_candidate_code_hidden] * batch_size
memory_to_stack = [torch.zeros_like(code_memory[0])] * batch_size
seq_lengths = [0] * batch_size
for i, batch_id in enumerate(batch_ids):
enc_to_stack[batch_id] = code_enc[i]
memory_to_stack[batch_id] = code_memory[i]
seq_lengths[batch_id] = orig_seq_lengths[i]
enc = torch.stack(enc_to_stack)
memory = torch.stack(memory_to_stack)
return enc, memory, seq_lengths
enc = self.empty_candidate_code_hidden.expand(batch_size, -1)
return enc, None, None
class Seq2SeqModel(Seq2CodeModel):
def __init__(self, args):
self.vocab = data.load_vocabs(args.word_vocab, args.code_vocab, args.num_placeholders, getattr(args, 'vocab_mapping', True))
self.model = Spec2Seq(
self.vocab.word_vocab_size + args.num_placeholders, self.vocab.code_vocab_size + args.num_placeholders, args)
self._executor = None
super(Seq2SeqModel, self).__init__(args)
@property
def executor(self):
if self._executor is None:
self._executor = executor.get_executor(self.args)()
return self._executor
def encode(self, vocab, batch, volatile):
inputs = prepare_spec.encode_text(
vocab.wordtoi, batch, self.args.cuda, volatile)
hidden, memory = self.model.encode_text(inputs)
memory, seq_lengths, hidden = memory.pad(batch_first=True,
others_to_unsort=[hidden])
attn_mask = get_attn_mask(seq_lengths, self.args.cuda) if seq_lengths else None
return hidden, (memory, attn_mask)
def decode(self, vocab, batch, hidden, memory, volatile):
outputs = prepare_spec.encode_output_code_seq(
vocab.codetoi, batch, self.args.cuda, volatile)
logits = self.model.decode(hidden, memory, data.replace_pad_with_end(outputs[:, :-1]))
return logits.view(-1, logits.size(2)), outputs[:, 1:].contiguous().view(-1)
def compute_loss(self, batch, volatile=False):
vocab = self.reset_vocab()
hidden, memory = self.encode(vocab, batch, volatile)
logits, labels = self.decode(vocab, batch, hidden, memory, volatile)
return self.criterion(logits, labels)
def compute_loss_(self, batch, volatile):
return self.compute_loss(batch, volatile)
def inference(self, batch):
vocab = self.reset_vocab()
hidden, memory = self.encode(vocab, batch, volatile=True)
beam_size = self.args.max_beam_trees
if beam_size > 1:
sequences = beam_search.beam_search(
len(batch),
decoders.BeamSearchState(
[hidden for _ in range(self.args.num_decoder_layers)],
prev_output=None),
MaskedMemory(memory[0], memory[1]),
self.model.decoder.decode_token,
beam_size,
cuda=self.args.cuda,
max_decoder_length=self.args.max_decoder_length)
return self._try_sequences([vocab.itocode]*len(sequences), sequences, batch, beam_size)
else:
result_ids = self.model.decoder.sample(hidden, memory)
return [InferenceResult(code_sequence=[vocab.itocode(idx.item()) for idx in ids]) for ids in result_ids]
|
{
"content_hash": "d880ec0fcfa9ded7a1d3af9069f5e07c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 132,
"avg_line_length": 40.289855072463766,
"alnum_prop": 0.6483812949640287,
"repo_name": "nearai/program_synthesis",
"id": "7b1809725c9deed517cb885a95bfbef19de04b34",
"size": "5560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "program_synthesis/algolisp/models/seq2seq_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "14936"
},
{
"name": "Jupyter Notebook",
"bytes": "2469525"
},
{
"name": "Python",
"bytes": "1024751"
}
],
"symlink_target": ""
}
|
from traits.api import HasTraits, List, Float, Enum, Bool
from traitsui.api import Item, CheckListEditor, UItem, HGroup, VGroup
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import BorderHGroup
from pychron.persistence_loggable import PersistenceMixin
from pychron.pychron_constants import ANALYSIS_TYPES, NULL_STR
PREFIX = {"Last Day": 24, "Last Week": 24 * 7, "Last Month": 24 * 30}
class RecentView(HasTraits, PersistenceMixin):
mass_spectrometers = List(dump=True)
available_mass_spectrometers = List
use_mass_spectrometers = Bool
nhours = Float(dump=True)
ndays = Float(dump=True)
presets = Enum(NULL_STR, "Last Day", "Last Week", "Last Month", dump=True)
analysis_types = List(ANALYSIS_TYPES, dump=True)
available_analysis_types = List(ANALYSIS_TYPES)
persistence_name = "recent_view"
def traits_view(self):
v = okcancel_view(
VGroup(
HGroup(
BorderHGroup(
UItem(
"presets",
),
label="Presets",
),
BorderHGroup(
Item(
"ndays",
label="Days",
tooltip="Number of days. Set Presets to --- to enable",
enabled_when='presets=="---"',
),
UItem(
"nhours",
tooltip="Number of hours. Set Presets to --- to enable",
enabled_when='presets=="---"',
),
label="Time",
),
),
BorderHGroup(
UItem(
"mass_spectrometers",
style="custom",
editor=CheckListEditor(
name="available_mass_spectrometers", cols=5
),
),
defined_when="use_mass_spectrometers",
label="Mass Spectrometers",
),
BorderHGroup(
UItem(
"analysis_types",
style="custom",
editor=CheckListEditor(name="available_analysis_types", cols=5),
),
label="Analysis Types",
),
),
title="Recent Analyses",
)
return v
def _presets_changed(self, new):
if new and new != NULL_STR:
self.nhours = PREFIX[new]
def _ndays_changed(self, new):
if new:
self.presets = NULL_STR
self.nhours = new * 24
# ============= EOF =============================================
|
{
"content_hash": "0681de2e45c4a2549862e3a06fb65c03",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 34.30232558139535,
"alnum_prop": 0.4471186440677966,
"repo_name": "USGSDenverPychron/pychron",
"id": "0df3a0658aac9256b2602f2631080125c9745b61",
"size": "3678",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/envisage/browser/recent_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.template import Template, Context
from django.utils.html import strip_tags
from ...core.tests import utils
from .models import CommentPoll, CommentPollChoice, CommentPollVote, PollMode
from .forms import PollVoteManyForm
from .utils.render_static import post_render_static_polls
from .utils import render
User = get_user_model()
class PollViewTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.user2 = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category, user=self.user)
self.topic2 = utils.create_topic(self.category, user=self.user2)
self.comment = utils.create_comment(topic=self.topic)
self.user_comment = utils.create_comment(topic=self.topic, user=self.user)
self.poll = CommentPoll.objects.create(comment=self.comment, name='foo')
self.poll_multi = CommentPoll.objects.create(comment=self.comment, name='bar', choice_max=2)
def test_poll_close_logged_in(self):
"""
User must be logged in
"""
response = self.client.post(reverse('spirit:comment:poll:close', kwargs={'pk': self.poll.pk, }),
{})
self.assertEqual(response.status_code, 302)
def test_poll_close_wrong_user(self):
"""
Try to close another user poll should return 404
"""
utils.login(self)
response = self.client.post(reverse('spirit:comment:poll:close', kwargs={'pk': self.poll.pk, }),
{})
self.assertEqual(response.status_code, 404)
def test_poll_close_get(self):
"""
GET, poll_close
"""
utils.login(self)
response = self.client.get(reverse('spirit:comment:poll:close', kwargs={'pk': self.poll.pk, }))
self.assertEqual(response.status_code, 405)
def test_poll_close_post(self):
"""
POST, poll_close
"""
utils.login(self)
poll = CommentPoll.objects.create(comment=self.user_comment, name='foo')
response = self.client.post(reverse('spirit:comment:poll:close', kwargs={'pk': poll.pk, }),
{})
expected_url = poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertTrue(CommentPoll.objects.get(pk=poll.pk).is_closed)
def test_poll_close_open_post(self):
"""
POST, poll_open
"""
utils.login(self)
poll = CommentPoll.objects.create(comment=self.user_comment, name='foo', close_at=timezone.now())
self.assertTrue(poll.is_closed)
response = self.client.post(reverse('spirit:comment:poll:open', kwargs={'pk': poll.pk, }),
{})
expected_url = poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertFalse(CommentPoll.objects.get(pk=poll.pk).is_closed)
def test_poll_vote_logged_in(self):
"""
User must be logged in
"""
response = self.client.post(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll.pk, }),
{})
expected_url = reverse('spirit:user:auth:login') + "?next=" + self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302)
def test_poll_vote_get(self):
"""
GET, poll_vote
Post is required
"""
utils.login(self)
response = self.client.get(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll.pk, }))
self.assertEqual(response.status_code, 405)
def test_poll_vote_post(self):
"""
POST, poll_vote
"""
utils.login(self)
choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description="op1")
form_data = {'choices': choice.pk, }
response = self.client.post(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll.pk, }),
form_data)
expected_url = self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertEqual(len(CommentPollVote.objects.filter(choice=choice)), 1)
def test_poll_vote_post_invalid(self):
"""
POST, poll_vote
"""
utils.login(self)
response = self.client.post(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll.pk, }),
{}, follow=True)
self.assertEqual(len(response.context['messages']), 1) # error message
def test_poll_vote_post_invalid_redirect(self):
"""
POST, poll_vote
"""
utils.login(self)
response = self.client.post(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll.pk, }),
{})
expected_url = self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
def test_poll_vote_post_multi(self):
"""
Should be able to vote many options
"""
utils.login(self)
choice_a = CommentPollChoice.objects.create(poll=self.poll_multi, number=1, description="op a")
choice_b = CommentPollChoice.objects.create(poll=self.poll_multi, number=2, description="op b")
CommentPollChoice.objects.create(poll=self.poll_multi, number=3, description="op c")
form_data = {'choices': [choice_a.pk, choice_b.pk]}
response = self.client.post(reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll_multi.pk, }),
form_data)
expected_url = self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertEqual(len(CommentPollVote.objects.all()), 2)
self.assertEqual(len(CommentPollVote.objects.filter(choice=choice_a.pk)), 1)
self.assertEqual(len(CommentPollVote.objects.filter(choice=choice_b.pk)), 1)
def test_poll_vote_post_count(self):
"""
Should increase the vote counters
"""
utils.login(self)
choice_a = CommentPollChoice.objects.create(poll=self.poll_multi, number=1, description="op a")
choice_b = CommentPollChoice.objects.create(poll=self.poll_multi, number=2, description="op b")
choice_c = CommentPollChoice.objects.create(poll=self.poll_multi, number=3, description="op c")
form_data = {'choices': [choice_a.pk, choice_b.pk]}
response = self.client.post(
reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll_multi.pk, }), form_data
)
expected_url = self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_a.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_b.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_c.pk).vote_count, 0)
form_data = {'choices': [choice_a.pk]}
response = self.client.post(
reverse('spirit:comment:poll:vote', kwargs={'pk': self.poll_multi.pk, }), form_data
)
expected_url = self.poll.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302, target_status_code=302)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_a.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_b.pk).vote_count, 0)
self.assertEqual(CommentPollChoice.objects.get(pk=choice_c.pk).vote_count, 0)
def test_poll_voters_logged_in(self):
"""
User must be logged in
"""
poll_choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description="op1")
response = self.client.get(reverse('spirit:comment:poll:voters', kwargs={'pk': poll_choice.pk, }))
self.assertEqual(response.status_code, 302)
def test_poll_voters(self):
"""
Should query choice voters
"""
poll_choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description="op1")
poll_choice2 = CommentPollChoice.objects.create(poll=self.poll, number=2, description="op2")
vote = CommentPollVote.objects.create(voter=self.user, choice=poll_choice)
CommentPollVote.objects.create(voter=self.user2, choice=poll_choice2)
utils.login(self)
response = self.client.get(reverse('spirit:comment:poll:voters', kwargs={'pk': poll_choice.pk, }))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['choice'], poll_choice)
self.assertEqual(list(response.context['votes']), [vote])
def test_poll_voters_secret(self):
"""
Should forbid view voters of secret poll when is not closed
"""
poll = CommentPoll.objects.create(comment=self.comment, name='foobar', mode=PollMode.SECRET)
poll_choice = CommentPollChoice.objects.create(poll=poll, number=1, description="op1")
utils.login(self)
response = self.client.get(reverse('spirit:comment:poll:voters', kwargs={'pk': poll_choice.pk, }))
self.assertEqual(response.status_code, 403)
def test_poll_voters_secret_closed(self):
"""
Should allow view voters of secret poll when is closed
"""
yesterday = timezone.now() - timezone.timedelta(days=1)
poll = CommentPoll.objects.create(comment=self.comment, name='foobar',
mode=PollMode.SECRET, close_at=yesterday)
poll_choice = CommentPollChoice.objects.create(poll=poll, number=1, description="op1")
utils.login(self)
response = self.client.get(reverse('spirit:comment:poll:voters', kwargs={'pk': poll_choice.pk, }))
self.assertEqual(response.status_code, 200)
class PollFormTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.user2 = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category, user=self.user)
self.comment = utils.create_comment(topic=self.topic)
self.comment2 = utils.create_comment(topic=self.topic)
# Single choice
self.poll = CommentPoll.objects.create(comment=self.comment, name='foo')
self.poll_choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description="op1")
self.poll_choice2 = CommentPollChoice.objects.create(poll=self.poll, number=2, description="op2")
self.poll_vote = CommentPollVote.objects.create(voter=self.user, choice=self.poll_choice)
self.poll_vote2 = CommentPollVote.objects.create(voter=self.user2, choice=self.poll_choice)
# ...poor man prefetch
self.poll_choice.votes = [self.poll_vote]
self.poll.choices = [self.poll_choice, self.poll_choice2]
# Multi choice
self.poll_multi = CommentPoll.objects.create(comment=self.comment2, name='bar', choice_max=2)
self.poll_multi_choice = CommentPollChoice.objects.create(poll=self.poll_multi, number=1, description="op1")
self.poll_multi_choice2 = CommentPollChoice.objects.create(poll=self.poll_multi, number=2, description="op2")
self.poll_multi_choice3 = CommentPollChoice.objects.create(poll=self.poll_multi, number=3, description="op3")
self.poll_multi_vote = CommentPollVote.objects.create(voter=self.user, choice=self.poll_multi_choice)
self.poll_multi_vote2 = CommentPollVote.objects.create(voter=self.user, choice=self.poll_multi_choice2)
self.poll_multi_vote3 = CommentPollVote.objects.create(voter=self.user2, choice=self.poll_multi_choice)
# ...poor man prefetch
self.poll_multi_choice.votes = [self.poll_multi_vote]
self.poll_multi_choice2.votes = [self.poll_multi_vote2]
self.poll_multi.choices = [self.poll_multi_choice, self.poll_multi_choice2]
def test_vote_load_initial_single(self):
"""
TopicPollVoteManyForm
"""
form = PollVoteManyForm(user=self.user, poll=self.poll)
form.load_initial()
self.assertEqual(form.initial, {'choices': self.poll_choice.pk, })
def test_vote_load_initial_multi(self):
"""
TopicPollVoteManyForm
"""
form = PollVoteManyForm(user=self.user, poll=self.poll_multi)
form.load_initial()
self.assertDictEqual(form.initial, {'choices': [self.poll_multi_choice.pk, self.poll_multi_choice2.pk], })
def test_vote_load_initial_empty(self):
"""
TopicPollVoteManyForm
"""
CommentPollVote.objects.all().delete()
self.poll_choice.votes = []
form = PollVoteManyForm(user=self.user, poll=self.poll)
form.load_initial()
self.assertEqual(form.initial, {})
def test_vote_load_initial_choice_limit(self):
"""
Load initial for a single choice poll that was previously a multi choice poll
"""
# multi to single
self.poll_multi.choice_max = 1
form = PollVoteManyForm(user=self.user, poll=self.poll_multi)
form.load_initial()
self.assertDictEqual(form.initial, {'choices': self.poll_multi_choice.pk, })
# single to multi
self.poll.choice_max = 2
form = PollVoteManyForm(user=self.user, poll=self.poll)
form.load_initial()
self.assertDictEqual(form.initial, {'choices': [self.poll_choice.pk, ], })
def test_vote_poll_closed(self):
"""
Cant vote on closed poll
"""
self.poll.close_at = timezone.now()
self.poll.save()
form_data = {'choices': self.poll_choice.pk, }
form = PollVoteManyForm(user=self.user, poll=self.poll, data=form_data)
self.assertFalse(form.is_valid())
def test_create_vote_single(self):
"""
TopicPollVoteManyForm
"""
CommentPollVote.objects.all().delete()
form_data = {'choices': self.poll_choice.pk, }
form = PollVoteManyForm(user=self.user, poll=self.poll, data=form_data)
self.assertTrue(form.is_valid())
form.save_m2m()
self.assertEqual(len(CommentPollVote.objects.all()), 1)
self.assertEqual(len(CommentPollVote.objects.filter(choice=self.poll_choice, is_removed=False)), 1)
def test_create_vote_multi(self):
"""
TopicPollVoteManyForm
"""
CommentPollVote.objects.all().delete()
self.poll_multi_choice.votes = []
self.poll_multi_choice2.votes = []
form_data = {'choices': [self.poll_multi_choice.pk, self.poll_multi_choice2.pk], }
form = PollVoteManyForm(user=self.user, poll=self.poll_multi, data=form_data)
self.assertTrue(form.is_valid())
def test_create_vote_multi_invalid(self):
"""
Limit selected choices to choice_limit
"""
CommentPollVote.objects.all().delete()
self.poll_multi_choice.votes = []
self.poll_multi_choice2.votes = []
form_data = {'choices': [self.poll_multi_choice.pk,
self.poll_multi_choice2.pk,
self.poll_multi_choice3.pk], }
form = PollVoteManyForm(user=self.user, poll=self.poll_multi, data=form_data)
self.assertFalse(form.is_valid())
def test_update_vote_single(self):
"""
TopicPollVoteManyForm
"""
self.assertEqual(len(CommentPollVote.objects.filter(choice=self.poll_choice2, is_removed=False)), 0)
self.assertEqual(len(CommentPollVote.objects.filter(choice=self.poll_choice, is_removed=False)), 2)
form_data = {'choices': self.poll_choice2.pk, }
form = PollVoteManyForm(user=self.user, poll=self.poll, data=form_data)
self.assertTrue(form.is_valid())
form.save_m2m()
self.assertEqual(len(CommentPollVote.objects.filter(choice=self.poll_choice2, is_removed=False)), 1)
self.assertEqual(len(CommentPollVote.objects.filter(choice=self.poll_choice, is_removed=False)), 1)
class CommentPollTemplateTagsTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category)
self.user_comment = utils.create_comment(topic=self.topic, user=self.user, comment_html="<poll name=foo>")
self.user_poll = CommentPoll.objects.create(comment=self.user_comment, name='foo')
self.user_comment_with_polls = self.user_comment.__class__.objects\
.filter(pk=self.user_comment.pk)\
.with_polls(self.user)\
.first()
self.request = RequestFactory().get('/')
self.request.user = self.user
def test_render_polls_form(self):
"""
Should display poll vote form
"""
res = []
def mock_render_to_string(tlt, ctx):
res.append(tlt)
res.append(ctx)
org_render_to_string, render.render_to_string = render.render_to_string, mock_render_to_string
try:
render.render_polls(self.user_comment_with_polls, self.request, 'csrf_token_foo')
self.assertEqual(len(res), 2)
template, context = res[0], res[1]
self.assertEqual(template, 'spirit/comment/poll/_form.html')
self.assertEqual(context['form'].poll, self.user_poll)
self.assertIsInstance(context['poll'], CommentPoll)
self.assertEqual(context['user'], self.user)
self.assertEqual(context['comment'], self.user_comment_with_polls)
self.assertEqual(context['request'], self.request)
self.assertEqual(context['csrf_token'], 'csrf_token_foo')
finally:
render.render_to_string = org_render_to_string
def test_render_polls_template_form(self):
"""
Should display poll vote form
"""
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': self.user_comment_with_polls, 'request': self.request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
self.assertTrue("<poll" not in out)
form_id = 'id="p%s"' % self.user_poll.pk
self.assertTrue(form_id in out)
show_link = '?show_poll=%(pk)s#p%(pk)s' % {'pk': self.user_poll.pk}
self.assertTrue(show_link in out)
def test_render_polls_template_form_not_author(self):
"""
Should display poll vote form
"""
request = RequestFactory().get('/')
request.user = utils.create_user()
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': self.user_comment_with_polls, 'request': request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
form_id = 'id="p%s"' % self.user_poll.pk
self.assertTrue(form_id in out)
def test_render_polls_template_form_close(self):
"""
Should display the close button
"""
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': self.user_comment_with_polls, 'request': self.request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
close_link = reverse('spirit:comment:poll:close', kwargs={'pk': self.user_poll.pk})
self.assertTrue(close_link in out)
def test_render_polls_template_form_close_not_author(self):
"""
Should *not* display the close button to not poll author
"""
request = RequestFactory().get('/')
request.user = utils.create_user()
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': self.user_comment_with_polls, 'request': request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
close_link = reverse('spirit:comment:poll:close', kwargs={'pk': self.user_poll.pk})
self.assertTrue(close_link not in out)
def test_render_polls_template_form_open(self):
"""
Should display the open button
"""
self.user_comment_with_polls.polls[0].close_at = timezone.now() # renders results.html
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': self.user_comment_with_polls, 'request': self.request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
open_link = reverse('spirit:comment:poll:open', kwargs={'pk': self.user_poll.pk})
self.assertTrue(open_link in out)
def test_render_polls_secret(self):
"""
Should not display the view results link when poll is secret and is not closed
"""
comment = utils.create_comment(topic=self.topic, comment_html="<poll name=bar>")
CommentPoll.objects.create(comment=comment, name='bar', mode=PollMode.SECRET)
user_comment_with_polls = comment.__class__.objects\
.filter(pk=comment.pk)\
.with_polls(self.user)\
.first()
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': user_comment_with_polls, 'request': self.request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
self.assertFalse('Show results' in out)
self.assertTrue('form' in out)
def test_render_polls_secret_closed(self):
"""
Should display the results when poll is secret and is closed
"""
comment = utils.create_comment(topic=self.topic, comment_html="<poll name=bar>")
yesterday = timezone.now() - timezone.timedelta(days=1)
CommentPoll.objects.create(comment=comment, name='bar', mode=PollMode.SECRET, close_at=yesterday)
user_comment_with_polls = comment.__class__.objects\
.filter(pk=comment.pk)\
.with_polls(self.user)\
.first()
out = Template(
"{% load spirit_tags %}"
"{% post_render_comment comment=comment %}"
).render(Context({'comment': user_comment_with_polls, 'request': self.request, 'csrf_token': 'foo'}))
self.assertNotEqual(out.strip(), "")
self.assertFalse('show_poll=' in out)
self.assertFalse('form' in out)
self.assertTrue('comment-poll' in out)
class PollModelsTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
self.comment = utils.create_comment(topic=self.topic)
self.poll = CommentPoll.objects.create(comment=self.comment, name='foo')
self.choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description=1)
self.vote = CommentPollVote.objects.create(choice=self.choice, voter=self.user)
# Kinda like comment.with_polls()
self.poll.choices = list(CommentPollChoice.objects.filter(poll=self.poll))
for c in self.poll.choices:
c.votes = list(CommentPollVote.objects.filter(choice=c, voter=self.user))
def test_poll_is_multiple_choice(self):
"""
Should be true when max > 1
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar', choice_max=2)
self.assertFalse(self.poll.is_multiple_choice)
self.assertTrue(poll.is_multiple_choice)
def test_poll_has_choice_min(self):
"""
Should be true when min > 1
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar', choice_min=2)
self.assertFalse(self.poll.has_choice_min)
self.assertTrue(poll.has_choice_min)
def test_poll_is_closed(self):
"""
Should be true when close_at > now
"""
yesterday = timezone.now() - timezone.timedelta(days=1)
tomorrow = timezone.now() + timezone.timedelta(days=1)
poll_old = CommentPoll.objects.create(comment=self.comment, name='bar', close_at=yesterday)
poll_new = CommentPoll.objects.create(comment=self.comment, name='bar2', close_at=tomorrow)
self.assertFalse(self.poll.is_closed)
self.assertTrue(poll_old.is_closed)
self.assertFalse(poll_new.is_closed)
def test_poll_has_user_voted(self):
"""
Should be true when the user has voted
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar')
CommentPollChoice.objects.create(poll=poll, number=1, description=1)
poll.choices = list(CommentPollChoice.objects.filter(poll=poll))
for c in poll.choices:
c.votes = []
self.assertTrue(self.poll.has_user_voted)
self.assertFalse(poll.has_user_voted)
def test_poll_mode_txt(self):
"""
Should return the mode description
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar')
self.assertEqual(poll.mode_txt, 'default')
poll = CommentPoll.objects.create(comment=self.comment, name='bar2', mode=PollMode.SECRET)
self.assertEqual(poll.mode_txt, 'secret')
def test_poll_total_votes(self):
"""
Should return the total votes
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar')
CommentPollChoice.objects.create(poll=poll, number=1, description='foo', vote_count=5)
CommentPollChoice.objects.create(poll=poll, number=2, description='bar', vote_count=5)
poll.choices = list(CommentPollChoice.objects.filter(poll=poll))
self.assertEqual(poll.total_votes, 10)
def test_poll_is_secret(self):
"""
Should return whether the poll is secret or not
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar')
self.assertFalse(poll.is_secret)
poll.mode = PollMode.SECRET
self.assertTrue(poll.is_secret)
def test_poll_can_show_results(self):
"""
Should return whether the poll results can be shown or not depending on the mode
"""
poll = CommentPoll.objects.create(comment=self.comment, name='bar')
self.assertTrue(poll.can_show_results)
poll.mode = PollMode.SECRET
self.assertFalse(poll.can_show_results)
yesterday = timezone.now() - timezone.timedelta(days=1)
poll.close_at = yesterday
self.assertTrue(poll.can_show_results)
def test_poll_update_or_create_many(self):
"""
Should create or update many polls for a given comment
"""
poll_raw = {'name': 'foo_raw', 'title': 'foo', 'choice_min': 2,
'choice_max': 2, 'close_at': timezone.now(), 'mode': PollMode.SECRET}
CommentPoll.update_or_create_many(comment=self.comment, polls_raw=[poll_raw])
poll = CommentPoll.objects.all().order_by('pk').last()
self.assertEqual(poll.name, poll_raw['name'])
self.assertEqual(poll.title, poll_raw['title'])
self.assertEqual(poll.choice_min, poll_raw['choice_min'])
self.assertEqual(poll.choice_max, poll_raw['choice_max'])
self.assertEqual(poll.close_at, poll_raw['close_at'])
self.assertEqual(poll.mode, poll_raw['mode'])
# Update
CommentPoll.update_or_create_many(comment=self.comment, polls_raw=[{'name': poll.name, 'title': 'bar'}])
poll_updated = CommentPoll.objects.all().order_by('pk').last()
self.assertEqual(poll.pk, poll_updated.pk)
self.assertEqual(poll_updated.title, 'bar')
def test_poll_update_or_create_many_update_un_remove(self):
"""
Should mark the poll as not removed on update
"""
poll = CommentPoll.objects.create(comment=self.comment, name='foo_rm', is_removed=True)
CommentPoll.update_or_create_many(comment=poll.comment, polls_raw=[{'name': poll.name}])
poll_updated = CommentPoll.objects.all().order_by('pk').last()
self.assertEqual(poll.pk, poll_updated.pk)
self.assertFalse(poll_updated.is_removed)
def test_poll_choice_vote(self):
"""
Should return the user vote for a given choice
"""
choice = CommentPollChoice.objects.create(poll=self.poll, number=5, description="foobar")
vote = CommentPollVote.objects.create(choice=choice, voter=self.user)
choice.votes = list(CommentPollVote.objects.filter(choice=choice, voter=self.user))
self.assertEqual(choice.vote, vote)
choice.votes = []
self.assertIsNone(choice.vote)
del choice.votes
self.assertIsNone(choice.vote)
choice.votes = [vote, vote]
self.assertRaises(AssertionError, lambda: choice.vote)
def test_poll_choice_votes_percentage(self):
"""
Should return the percentage of votes for a choice
"""
poll = CommentPoll.objects.create(comment=self.comment, name='percentage')
choice = CommentPollChoice.objects.create(poll=poll, number=1, description="foobar", vote_count=1)
poll.total_votes = 2
self.assertEqual(choice.votes_percentage, 50)
poll.total_votes = 3
self.assertEqual('{:.2f}'.format(choice.votes_percentage), '33.33')
poll.total_votes = 0
self.assertEqual(choice.votes_percentage, 0)
def test_poll_choice_increase_vote_count(self):
"""
Should increase the vote count of all choices for a given user and poll
"""
poll = CommentPoll.objects.create(comment=self.comment, name='percentage')
choice = CommentPollChoice.objects.create(poll=poll, number=1, description="foobar")
choice2 = CommentPollChoice.objects.create(poll=poll, number=2, description="foobar")
CommentPollVote.objects.create(choice=choice, voter=self.user)
CommentPollVote.objects.create(choice=choice2, voter=self.user)
user2 = utils.create_user()
CommentPollVote.objects.create(choice=choice, voter=user2)
CommentPollChoice.increase_vote_count(poll, self.user)
self.assertEqual(CommentPollChoice.objects.get(pk=self.choice.pk).vote_count, 0)
self.assertEqual(CommentPollChoice.objects.get(pk=choice.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice2.pk).vote_count, 1)
CommentPollChoice.objects.filter(pk=choice.pk).update(is_removed=True)
CommentPollChoice.increase_vote_count(poll, self.user)
self.assertEqual(CommentPollChoice.objects.get(pk=choice.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice2.pk).vote_count, 2)
def test_poll_choice_decrease_vote_count(self):
"""
Should decrease the vote count of all choices for a given user and poll
"""
poll = CommentPoll.objects.create(comment=self.comment, name='percentage')
choice = CommentPollChoice.objects.create(poll=poll, number=1, description="foobar", vote_count=2)
choice2 = CommentPollChoice.objects.create(poll=poll, number=2, description="foobar", vote_count=2)
CommentPollVote.objects.create(choice=choice, voter=self.user)
CommentPollVote.objects.create(choice=choice2, voter=self.user)
user2 = utils.create_user()
CommentPollVote.objects.create(choice=choice, voter=user2)
CommentPollChoice.decrease_vote_count(poll, self.user)
self.assertEqual(CommentPollChoice.objects.get(pk=self.choice.pk).vote_count, 0)
self.assertEqual(CommentPollChoice.objects.get(pk=choice.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice2.pk).vote_count, 1)
CommentPollChoice.objects.filter(pk=choice.pk).update(is_removed=True)
CommentPollChoice.decrease_vote_count(poll, self.user)
self.assertEqual(CommentPollChoice.objects.get(pk=choice.pk).vote_count, 1)
self.assertEqual(CommentPollChoice.objects.get(pk=choice2.pk).vote_count, 0)
def test_poll_choice_update_or_create_many(self):
"""
Should create or update many choices for a given poll
"""
choice_raw = {'poll_name': 'foo', 'number': 2, 'description': '2 bar'}
CommentPollChoice.update_or_create_many(comment=self.comment, choices_raw=[choice_raw])
choice = CommentPollChoice.objects.all().order_by('pk').last()
self.assertTrue(CommentPollChoice.objects.get(pk=self.choice.pk).is_removed)
self.assertEqual(choice.poll, self.poll)
self.assertEqual(choice.number, 2)
self.assertEqual(choice.description, '2 bar')
self.assertFalse(choice.is_removed)
# Update
choice_raw2 = {'poll_name': 'foo', 'number': 1, 'description': '1 bar'}
choice_raw['description'] = '2 foo'
CommentPollChoice.update_or_create_many(comment=self.comment, choices_raw=[choice_raw, choice_raw2])
choice_updated = CommentPollChoice.objects.all().order_by('pk').last()
self.assertFalse(CommentPollChoice.objects.get(pk=self.choice.pk).is_removed)
self.assertEqual(choice_updated.poll, self.poll)
self.assertEqual(choice_updated.number, 2)
self.assertEqual(choice_updated.description, '2 foo')
self.assertFalse(choice.is_removed)
def test_poll_choice_update_or_create_many_removed_poll(self):
"""
Should raise an Exception if poll is_removed
"""
CommentPoll.objects.filter(pk=self.poll.pk).update(is_removed=True)
choice_raw = {'poll_name': 'foo', 'number': 2, 'description': '2 bar'}
self.assertRaises(KeyError, CommentPollChoice.update_or_create_many,
comment=self.comment, choices_raw=[choice_raw])
class PollUtilsTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
self.comment = utils.create_comment(topic=self.topic, comment_html="<poll name=foo>")
self.poll = CommentPoll.objects.create(comment=self.comment, name='foo', title="my poll")
self.choice = CommentPollChoice.objects.create(poll=self.poll, number=1, description="choice 1")
self.choice = CommentPollChoice.objects.create(poll=self.poll, number=2, description="choice 2")
def test_post_render_static_polls(self):
"""
Should render the static polls
"""
comment_html = post_render_static_polls(self.comment)
self.assertTrue('my poll' in comment_html)
comment_parts = [
l.strip()
for l in strip_tags(comment_html).splitlines()
if l.strip()
]
self.assertEqual(comment_parts, [
'my poll',
'#1 choice 1',
'#2 choice 2',
'Name: foo, choice selection: from 1 up to 1, mode: default'
])
def test_post_render_static_polls_many(self):
"""
Should render the many static polls
"""
comment = utils.create_comment(topic=self.topic, comment_html="<poll name=foo>\n<poll name=bar>")
CommentPoll.objects.create(comment=comment, name='foo', title="my poll")
CommentPoll.objects.create(comment=comment, name='bar', title="my other poll")
comment_html = post_render_static_polls(comment)
self.assertTrue('my poll' in comment_html)
self.assertTrue('my other poll' in comment_html)
def test_post_render_static_polls_close_at(self):
"""
Should render the static polls with close_at
"""
now = timezone.now()
comment = utils.create_comment(topic=self.topic, comment_html="<poll name=foo>")
CommentPoll.objects.create(comment=comment, name='foo', title="my poll", close_at=now)
comment_html = post_render_static_polls(comment)
self.assertTrue('close at:' in comment_html)
self.assertTrue('Name:' in comment_html)
self.assertTrue('choice selection:' in comment_html)
self.assertTrue('mode:' in comment_html)
def test_post_render_static_polls_no_poll(self):
"""
Should render the comment with no poll
"""
comment = utils.create_comment(topic=self.topic, comment_html="foo")
comment_html = post_render_static_polls(comment)
self.assertEqual(comment_html, 'foo')
def test_post_render_static_polls_removed_poll(self):
"""
Should not render removed polls
"""
self.poll.is_removed = True
self.poll.save()
comment_html = post_render_static_polls(self.comment)
self.assertEqual(comment_html, "<poll name=foo>")
|
{
"content_hash": "6ffadb1e40a3a722349c48a568764c3a",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 117,
"avg_line_length": 44.5177304964539,
"alnum_prop": 0.6357867346396898,
"repo_name": "david30907d/feedback_django",
"id": "76f6a61549ea8b168ff91b4250b00ed12514714d",
"size": "37687",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/spirit/comment/poll/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266788"
},
{
"name": "CoffeeScript",
"bytes": "222884"
},
{
"name": "HTML",
"bytes": "384426"
},
{
"name": "JavaScript",
"bytes": "61542"
},
{
"name": "Python",
"bytes": "1277926"
}
],
"symlink_target": ""
}
|
"""
Performs initial configuration of Wikifeat services
Note: Requires python3
"""
# from configparser import ConfigParser
from argparse import ArgumentParser
import util
import os
from libs import configobj
wikifeat_path = os.path.realpath(os.path.join(os.getcwd(), os.pardir))
def config_template():
return os.path.join(wikifeat_path, 'config/config.ini.example')
def config_file():
return os.path.join(wikifeat_path, 'config/config.ini')
def frontend_index_template():
return os.path.join(wikifeat_path, 'frontend/index.html.template')
def frontend_index_file():
return os.path.join(wikifeat_path, 'frontend/index.html')
def frontend_plugin_template():
return os.path.join(wikifeat_path, 'frontend/plugins/plugins.ini.example')
def frontend_plugin_file():
return os.path.join(wikifeat_path, 'frontend/plugins/plugins.ini')
def config_frontend_service():
import shutil
print("Configuring frontend service...")
shutil.copyfile(frontend_index_template(), frontend_index_file())
shutil.copyfile(frontend_plugin_template(), frontend_plugin_file())
return True
def config_database(config, db_params):
config['Database']['dbAddr'] = db_params.host
config['Database']['dbPort'] = db_params.port
config['Database']['dbAdminUser'] = db_params.adminuser
config['Database']['dbAdminPassword'] = db_params.adminpass
def config_webapp(config):
print("Configuring webapp...")
config['Frontend']['webAppDir'] = wikifeat_path + "/frontend/web_app/app"
config['Frontend']['pluginDir'] = wikifeat_path + "/frontend/plugins"
def config_all(common_params, db_params):
print("Configuring wikifeat...")
try:
config = configobj.ConfigObj(
config_template(), file_error=True
)
except IOError:
print("Error reading file " + config_template())
return False
config_database(config, db_params)
config_webapp(config)
with open(config_file(), 'w') as out_file:
config.write(out_file)
config_frontend_service()
print("Configuration complete")
def main(domain_name, db_params, install_dir):
global wikifeat_path
if install_dir is not None:
wikifeat_path = install_dir
common_params = dict()
common_params['domainName'] = domain_name
config_all(common_params, db_params)
if __name__ == "__main__":
parser = ArgumentParser()
util.add_couch_params(parser)
parser.add_argument('--domain_name', dest='domain_name',
help='host domain name')
parser.add_argument('--wikifeat_home', dest='wikifeat_home',
help='Wikifeat install directory')
parser.set_defaults(domain_name='localhost')
parser.set_defaults(wikifeat_home=wikifeat_path)
args = parser.parse_args()
if args.adminuser is None:
args.adminuser = input("Enter CouchDB admin username: ")
if args.adminpass is None:
args.adminpass = input("Enter CouchDB admin password: ")
couch_params = util.CouchParameters(args)
main(args.domain_name, couch_params, args.wikifeat_home)
|
{
"content_hash": "67b16ce768b0714c2e7d5eed05591cff",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 29.98076923076923,
"alnum_prop": 0.6828094932649134,
"repo_name": "rhinoman/wikifeat",
"id": "141a57532ff0f9306c92f459f7f357f72a96e7fa",
"size": "3142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/config.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "103"
},
{
"name": "Go",
"bytes": "357684"
},
{
"name": "HTML",
"bytes": "7282"
},
{
"name": "JavaScript",
"bytes": "13408"
},
{
"name": "Python",
"bytes": "120508"
},
{
"name": "Shell",
"bytes": "3120"
}
],
"symlink_target": ""
}
|
"""Moving MNIST dataset.
Unsupervised Learning of Video Representations using LSTMs
Nitish Srivastava, Elman Mansimov, Ruslan Salakhutdinov
https://arxiv.org/abs/1502.04681
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import video_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import contrib
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
DATA_URL = (
"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy")
SPLIT_TO_SIZE = {
problem.DatasetSplit.TRAIN: 100000,
problem.DatasetSplit.EVAL: 10000,
problem.DatasetSplit.TEST: 10000}
@registry.register_problem
class VideoMovingMnist(video_utils.VideoProblem):
"""MovingMnist Dataset."""
@property
def num_channels(self):
return 1
@property
def frame_height(self):
return 64
@property
def frame_width(self):
return 64
@property
def is_generate_per_split(self):
return True
# num_videos * num_frames
@property
def total_number_of_frames(self):
return 100000 * 20
def max_frames_per_video(self, hparams):
return 20
@property
def random_skip(self):
return False
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [
{"split": problem.DatasetSplit.TRAIN, "shards": 10},
{"split": problem.DatasetSplit.EVAL, "shards": 1},
{"split": problem.DatasetSplit.TEST, "shards": 1}]
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
}
decoders = {
"frame_number":
contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"),
}
return data_fields, decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.VIDEO,
"targets": modalities.ModalityType.VIDEO}
p.vocab_size = {"inputs": 256,
"targets": 256}
def get_test_iterator(self, tmp_dir):
path = generator_utils.maybe_download(
tmp_dir, os.path.basename(DATA_URL), DATA_URL)
with tf.io.gfile.GFile(path, "rb") as fp:
mnist_test = np.load(fp)
mnist_test = np.transpose(mnist_test, (1, 0, 2, 3))
mnist_test = np.expand_dims(mnist_test, axis=-1)
mnist_test = tf.data.Dataset.from_tensor_slices(mnist_test)
return mnist_test.make_initializable_iterator()
def map_fn(self, image, label):
sequence = moving_sequence.image_as_moving_sequence(
image, sequence_length=20)
return sequence.image_sequence
def get_train_iterator(self):
mnist_ds = tfds.load("mnist:3.*.*", split=tfds.Split.TRAIN,
as_supervised=True)
mnist_ds = mnist_ds.repeat()
moving_mnist_ds = mnist_ds.map(self.map_fn).batch(2)
moving_mnist_ds = moving_mnist_ds.map(lambda x: tf.reduce_max(x, axis=0))
return moving_mnist_ds.make_initializable_iterator()
def generate_samples(self, data_dir, tmp_dir, dataset_split):
with tf.Graph().as_default():
# train and eval set are generated on-the-fly.
# test set is the official test-set.
if dataset_split == problem.DatasetSplit.TEST:
moving_ds = self.get_test_iterator(tmp_dir)
else:
moving_ds = self.get_train_iterator()
next_video = moving_ds.get_next()
with tf.Session() as sess:
sess.run(moving_ds.initializer)
n_samples = SPLIT_TO_SIZE[dataset_split]
for _ in range(n_samples):
next_video_np = sess.run(next_video)
for frame_number, frame in enumerate(next_video_np):
yield {
"frame_number": [frame_number],
"frame": frame,
}
|
{
"content_hash": "75cedd9eb43d9b37a7425f5df0d41c00",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 30.28985507246377,
"alnum_prop": 0.6691387559808613,
"repo_name": "tensorflow/tensor2tensor",
"id": "1f4600804919a5f14e1d9ef49611a8bed89389d2",
"size": "4786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/data_generators/moving_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "32015"
},
{
"name": "HTML",
"bytes": "34684"
},
{
"name": "JavaScript",
"bytes": "78408"
},
{
"name": "Jupyter Notebook",
"bytes": "2859453"
},
{
"name": "Python",
"bytes": "5109255"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
}
|
import sublime, sublime_plugin
import os, subprocess, string, json, threading, re, time, signal
ST3 = int(sublime.version()) > 3000
if ST3:
from .chigi_args import ChigiArgs
from .php_input import PhpInputThread
from .php_output import PhpOutputThread
def cmp(str_a,str_b):
return (str_a > str_b) - (str_a < str_b);
else:
from chigi_args import ChigiArgs
from php_input import PhpInputThread
from php_output import PhpOutputThread
class CheckEnvironmentCommandThread(threading.Thread):
"""
A thread to prevent wizard for configure from freezing the UI
"""
instance=None;
mutex=threading.Lock();
@staticmethod
def GetInstance():
if(CheckEnvironmentCommandThread.instance==None):
CheckEnvironmentCommandThread.mutex.acquire()
if(CheckEnvironmentCommandThread.instance==None):
# print('初始化实例')
CheckEnvironmentCommandThread.instance=CheckEnvironmentCommandThread()
else:
# print('单例已经实例化')
pass;
CheckEnvironmentCommandThread.mutex.release()
else:
#print('单例已经实例化')
pass;
return CheckEnvironmentCommandThread.instance
def __init__(self):
threading.Thread.__init__(self);
self.running = False;
self.setting = sublime.load_settings("phpConnector.sublime-settings");
self.encoding = self.setting.get("filesystem_encoding");
self.namespace = self.setting.get("namespaces");
self.composer = self.setting.get("composer");
self.php_path = None;
self.window = None;
self.windows = [];
def run(self):
self.running = True;
# 检测 PHP 环境
def freshSettings():
self.setting = sublime.load_settings("phpConnector.sublime-settings");
self.encoding = self.setting.get("filesystem_encoding");
self.namespace = self.setting.get("namespaces");
self.php_path = self.setting.get("php_path");
print("1###");
print(self.php_path);
time.sleep(1);
sublime.set_timeout(freshSettings,1);
time.sleep(1);
print("2###");
check_php_path = os.popen(self.php_path + ' -v').read();
print("3###");
pattern = re.compile(r'^PHP \d+.\d+');
if pattern.match(check_php_path):
self.check_php_path = True;
else:
self.check_php_path = False;
# 检测 sublime 窗口打开完毕
def freshWindows():
self.windows = sublime.windows();
while(True):
historyWindows = self.windows;
time.sleep(0.5);
sublime.set_timeout(freshWindows,1);
if(len(self.windows) > len(historyWindows)):
self.window = self.windows[0];
break;
pass;
if(self.check_php_path is True):
# 注册 PHP 主进程
popen_list = [self.php_path, os.path.join(ChigiArgs.CMD_DIR(), 'shell.php')];
if self.composer:
popen_list.append(self.composer);
pass
startupinfo = None;
try:
startupinfo = subprocess.STARTUPINFO();
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW;
except AttributeError:
#print("NOT WINDOWS");
pass;
php_main = subprocess.Popen(popen_list, stdin=subprocess.PIPE,stdout=subprocess.PIPE,shell=False, stderr=subprocess.PIPE, startupinfo=startupinfo);
ChigiArgs.GetInstance().phpMain = php_main;
PhpOutputThread(php_main.stdout).start();
def initPHP():
self.window.run_command("ax_text",{
"call":"\\Chigi\\Sublime\\Commands\\SetupEnvironment",
"cmd_args":{
"file_system_encoding":self.encoding,
"namespace_map":self.namespace
}
});
sublime.set_timeout(initPHP, 1);
print("4###");
#php_main.stdin.write("bankai\n".encode("UTF-8"));
#php_main.stdin.write("QQCUM\n".encode("UTF-8"));
#php_main.stdin.write(bytes("QQCUM\n","UTF-8"));
#print(self.commanderApp.PHP_MAIN);
return;
# 当前 PHP 解释器不可用,自动进入配置向导
time.sleep(0.3);
wizard_open = False;
if(self.check_php_path is False):
wizard_open = sublime.ok_cancel_dialog("PhpConnector: \n\nPlease provide an available PHP binary file into the environment setting of PhpConnector.");
def inputPhpPath():
self.window.show_input_panel('PHP PATH on your system', self.setting.get("php_path"),self.onDone, self.onChange, self.onCancel)
# sublime.run_command("open_file",{"file": "${packages}/PhpConnector/phpConnector.sublime-settings"});
if(wizard_open is True):
sublime.set_timeout(inputPhpPath, 10);
def onDone(self, input):
self.setting.set('php_path', input);
sublime.save_settings('phpConnector.sublime-settings');
sublime.status_message(('%s successfully '
+ 'specified') % input);
self.checkFileSystemEnc();
def onChange(self, input):
pass;
def onCancel(self):
pass;
def checkFileSystemEnc(self):
wizard_open = False;
wizard_open = sublime.ok_cancel_dialog(u"Then please ensure the correct encoding on your current file system.\n\n中文 Windows 系统请在接下来的输入框中输入 gbk\n\n日本語のシステムは shift-jis を入力ください");
def inputEnc():
self.window.show_input_panel('File System Encoding', self.setting.get("filesystem_encoding"),self.doneFileSystemEnc, self.onChange, self.onCancel);
if(wizard_open is True):
sublime.set_timeout(inputEnc, 10);
pass;
def doneFileSystemEnc(self,input):
self.setting.set('filesystem_encoding', input);
sublime.save_settings('phpConnector.sublime-settings');
sublime.status_message(('%s successfully '
+ 'specified') % input);
if(cmp(input.lower(),'gbk')==0):
sublime.ok_cancel_dialog(u"开始嗨皮地使用 PHP 来开发 Sublime 插件吧~~");
self.window.open_file(os.path.join(ChigiArgs.PKGPATH(),'readme.md'));
os.startfile(os.path.join(ChigiArgs.PKGPATH(),'docs','welcome.zhcn.html'));
pass;
else:
self.window.open_file(os.path.join(ChigiArgs.PKGPATH(),'readme.md'));
pass;
|
{
"content_hash": "da4ccf9109a84f3c2df8138f740701d5",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 184,
"avg_line_length": 42.354838709677416,
"alnum_prop": 0.585986290936786,
"repo_name": "chigix/sublime-php-connector",
"id": "c511019978b13fddc93ef127c5e4e4e2ca636f52",
"size": "6785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8037"
},
{
"name": "HTML",
"bytes": "212357"
},
{
"name": "JavaScript",
"bytes": "4521"
},
{
"name": "PHP",
"bytes": "100070"
},
{
"name": "Python",
"bytes": "21851"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
}
|
import datetime
from flask_appbuilder import Model
from sqlalchemy import Column, Date, Enum, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
mindate = datetime.date(datetime.MINYEAR, 1, 1)
class ContactGroup(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True, nullable=False)
def __repr__(self):
return self.name
class Contact(Model):
id = Column(Integer, primary_key=True)
name = Column(String(150), unique=True, nullable=False)
address = Column(String(564))
birthday = Column(Date, nullable=True)
personal_phone = Column(String(20))
personal_celphone = Column(String(20))
contact_group_id = Column(Integer, ForeignKey("contact_group.id"), nullable=False)
contact_group = relationship("ContactGroup")
gender = Column(Enum("Female", "Male"))
def __repr__(self):
return self.name
def month_year(self):
date = self.birthday or mindate
return datetime.datetime(date.year, date.month, 1) or mindate
def year(self):
date = self.birthday or mindate
return datetime.datetime(date.year, 1, 1)
|
{
"content_hash": "56c5b534315cb9bcfda465351995f11e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 30.473684210526315,
"alnum_prop": 0.6865284974093264,
"repo_name": "dpgaspar/Flask-AppBuilder",
"id": "b723e8c0c1c5364265af6faafe1d5beb2f2aec2d",
"size": "1158",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/enums/app/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10821"
},
{
"name": "HTML",
"bytes": "88195"
},
{
"name": "JavaScript",
"bytes": "13746"
},
{
"name": "Python",
"bytes": "1041543"
},
{
"name": "Shell",
"bytes": "1343"
}
],
"symlink_target": ""
}
|
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import division
import numpy as np
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(
labels, np.ndarray) or labels.dtype != np.bool or len(labels.shape) != 1:
raise ValueError("labels must be single dimension bool numpy array")
if not isinstance(
scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = 1 - true_positive_labels
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(recall,
np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
# Divide by zero expected for classes with no gt examples.
with np.errstate(divide="ignore", invalid="ignore"):
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
|
{
"content_hash": "26f5c2d3d75d964720f918523fb61234",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 36.83076923076923,
"alnum_prop": 0.6942355889724311,
"repo_name": "activitynet/ActivityNet",
"id": "e8ae8867eac7c13c933370dcf191d62c4f113f1f",
"size": "5478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Evaluation/ava/metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22855212"
},
{
"name": "Python",
"bytes": "202438"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
}
|
"""Provides device triggers for binary sensors."""
import voluptuous as vol
from homeassistant.components.automation import state as state_automation
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.const import (
CONF_TURNED_OFF,
CONF_TURNED_ON,
)
from homeassistant.const import ATTR_DEVICE_CLASS, CONF_ENTITY_ID, CONF_FOR, CONF_TYPE
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers import config_validation as cv
from . import (
DOMAIN,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_BAT_LOW = "bat_low"
CONF_NOT_BAT_LOW = "not_bat_low"
CONF_COLD = "cold"
CONF_NOT_COLD = "not_cold"
CONF_CONNECTED = "connected"
CONF_NOT_CONNECTED = "not_connected"
CONF_GAS = "gas"
CONF_NO_GAS = "no_gas"
CONF_HOT = "hot"
CONF_NOT_HOT = "not_hot"
CONF_LIGHT = "light"
CONF_NO_LIGHT = "no_light"
CONF_LOCKED = "locked"
CONF_NOT_LOCKED = "not_locked"
CONF_MOIST = "moist"
CONF_NOT_MOIST = "not_moist"
CONF_MOTION = "motion"
CONF_NO_MOTION = "no_motion"
CONF_MOVING = "moving"
CONF_NOT_MOVING = "not_moving"
CONF_OCCUPIED = "occupied"
CONF_NOT_OCCUPIED = "not_occupied"
CONF_PLUGGED_IN = "plugged_in"
CONF_NOT_PLUGGED_IN = "not_plugged_in"
CONF_POWERED = "powered"
CONF_NOT_POWERED = "not_powered"
CONF_PRESENT = "present"
CONF_NOT_PRESENT = "not_present"
CONF_PROBLEM = "problem"
CONF_NO_PROBLEM = "no_problem"
CONF_UNSAFE = "unsafe"
CONF_NOT_UNSAFE = "not_unsafe"
CONF_SMOKE = "smoke"
CONF_NO_SMOKE = "no_smoke"
CONF_SOUND = "sound"
CONF_NO_SOUND = "no_sound"
CONF_VIBRATION = "vibration"
CONF_NO_VIBRATION = "no_vibration"
CONF_OPENED = "opened"
CONF_NOT_OPENED = "not_opened"
TURNED_ON = [
CONF_BAT_LOW,
CONF_COLD,
CONF_CONNECTED,
CONF_GAS,
CONF_HOT,
CONF_LIGHT,
CONF_LOCKED,
CONF_MOIST,
CONF_MOTION,
CONF_MOVING,
CONF_OCCUPIED,
CONF_OPENED,
CONF_PLUGGED_IN,
CONF_POWERED,
CONF_PRESENT,
CONF_PROBLEM,
CONF_SMOKE,
CONF_SOUND,
CONF_UNSAFE,
CONF_VIBRATION,
CONF_TURNED_ON,
]
TURNED_OFF = [
CONF_NOT_BAT_LOW,
CONF_NOT_COLD,
CONF_NOT_CONNECTED,
CONF_NOT_HOT,
CONF_NOT_LOCKED,
CONF_NOT_MOIST,
CONF_NOT_MOVING,
CONF_NOT_OCCUPIED,
CONF_NOT_OPENED,
CONF_NOT_PLUGGED_IN,
CONF_NOT_POWERED,
CONF_NOT_PRESENT,
CONF_NOT_UNSAFE,
CONF_NO_GAS,
CONF_NO_LIGHT,
CONF_NO_MOTION,
CONF_NO_PROBLEM,
CONF_NO_SMOKE,
CONF_NO_SOUND,
CONF_NO_VIBRATION,
CONF_TURNED_OFF,
]
ENTITY_TRIGGERS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BAT_LOW}, {CONF_TYPE: CONF_NOT_BAT_LOW}],
DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_COLD}, {CONF_TYPE: CONF_NOT_COLD}],
DEVICE_CLASS_CONNECTIVITY: [
{CONF_TYPE: CONF_CONNECTED},
{CONF_TYPE: CONF_NOT_CONNECTED},
],
DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GARAGE_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_GAS}, {CONF_TYPE: CONF_NO_GAS}],
DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_HOT}, {CONF_TYPE: CONF_NOT_HOT}],
DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_LIGHT}, {CONF_TYPE: CONF_NO_LIGHT}],
DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_LOCKED}, {CONF_TYPE: CONF_NOT_LOCKED}],
DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_MOIST}, {CONF_TYPE: CONF_NOT_MOIST}],
DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_MOTION}, {CONF_TYPE: CONF_NO_MOTION}],
DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_MOVING}, {CONF_TYPE: CONF_NOT_MOVING}],
DEVICE_CLASS_OCCUPANCY: [
{CONF_TYPE: CONF_OCCUPIED},
{CONF_TYPE: CONF_NOT_OCCUPIED},
],
DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_PLUG: [{CONF_TYPE: CONF_PLUGGED_IN}, {CONF_TYPE: CONF_NOT_PLUGGED_IN}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWERED}, {CONF_TYPE: CONF_NOT_POWERED}],
DEVICE_CLASS_PRESENCE: [{CONF_TYPE: CONF_PRESENT}, {CONF_TYPE: CONF_NOT_PRESENT}],
DEVICE_CLASS_PROBLEM: [{CONF_TYPE: CONF_PROBLEM}, {CONF_TYPE: CONF_NO_PROBLEM}],
DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_UNSAFE}, {CONF_TYPE: CONF_NOT_UNSAFE}],
DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_SMOKE}, {CONF_TYPE: CONF_NO_SMOKE}],
DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_SOUND}, {CONF_TYPE: CONF_NO_SOUND}],
DEVICE_CLASS_VIBRATION: [
{CONF_TYPE: CONF_VIBRATION},
{CONF_TYPE: CONF_NO_VIBRATION},
],
DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_TURNED_ON}, {CONF_TYPE: CONF_TURNED_OFF}],
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TURNED_OFF + TURNED_ON),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_type = config[CONF_TYPE]
if trigger_type in TURNED_ON:
from_state = "off"
to_state = "on"
else:
from_state = "on"
to_state = "off"
state_config = {
state_automation.CONF_PLATFORM: "state",
state_automation.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_automation.CONF_FROM: from_state,
state_automation.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_automation.TRIGGER_SCHEMA(state_config)
return await state_automation.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = hass.states.get(entry.entity_id)
if state:
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
|
{
"content_hash": "74671582016fc0d23d03cb6ccc5746f2",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 88,
"avg_line_length": 30.147410358565736,
"alnum_prop": 0.6451698163076517,
"repo_name": "qedi-r/home-assistant",
"id": "c51b9749288bb544671212f653176ccdb45dc65d",
"size": "7567",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/binary_sensor/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import enum
from datetime import datetime
from typing import NamedTuple
class BacklogLocation(enum.Enum):
"""A location with respect to the message backlog. BEGINNING refers to the
location of the oldest retained message. END refers to the location past
all currently published messages, skipping the entire message backlog."""
BEGINNING = 0
END = 1
class PublishTime(NamedTuple):
"""The publish timestamp of a message."""
value: datetime
class EventTime(NamedTuple):
"""A user-defined event timestamp of a message."""
value: datetime
|
{
"content_hash": "6601d3d89191072541f3eb4cb466d678",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.7310344827586207,
"repo_name": "googleapis/python-pubsublite",
"id": "dbd0c6039fca76def6ae28568aed799294922492",
"size": "1155",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "google/cloud/pubsublite/types/backlog_location.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1689513"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
import math
import os
import re
import time
import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import DITHER_SEED_CHECKSUM, SUBTRACTIVE_DITHER_1
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from .conftest import FitsTestCase
from .test_table import comparerecords
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\.") as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.") as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
# Can we use negative indices?
assert np.array_equal(fs[0].section[-1], dat[-1])
assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7])
assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3)]:
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype('>i2')
with fits.open(self.data('scale.fits')) as hdul:
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
with fits.open(self.temp('test_new.fits'), uint=True) as f:
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f'uint{int_size}'
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = f'uint{int_size}.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
with fits.open(self.temp('test_new.fits')) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp('test.fits')
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(fits.verify.VerifyWarning,
match=r"Invalid 'BLANK' keyword in header"):
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp('test2.fits')) as hdul2:
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename,
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
# The test below raised a `ResourceWarning: unclosed transport` exception
# due to a bug in Python <=3.10 (cf. cpython#90476)
@pytest.mark.filterwarnings("ignore:unclosed transport <asyncio.sslproto")
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match=r"Invalid value for "
r"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r'data object array\(1\) should have at least one dimension'
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif(not HAS_SCIPY, reason='requires scipy')
@pytest.mark.remote_data
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy
from astropy.utils import minversion
SCIPY_LT_1_10 = not minversion(scipy, '1.10dev0')
np.random.seed(42)
if SCIPY_LT_1_10:
import scipy.misc # No lazy loading for SCIPY_LT_1_9
scipy_data = scipy.misc.ascent()
else:
scipy_data = scipy.datasets.ascent()
data = scipy_data + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5).writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5).writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
@pytest.mark.slow
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data('double_ext.fits')) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices['EXTNAME']
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == 'ccd00'
assert 'EXTNAME' in hdu.header
assert hdu.name == hdu.header['EXTNAME']
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices['EXTNAME']
assert len(indices) == 1
# Test header sync from property set.
new_name = 'NEW_NAME'
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header['EXTNAME'] == new_name
assert hdu._header['EXTNAME'] == new_name
assert hdu._image_header['EXTNAME'] == new_name
# Check that setting the header will change the name property.
hdu.header['EXTNAME'] = 'NEW2'
assert hdu.name == 'NEW2'
hdul.writeto(self.temp('tmp.fits'), overwrite=True)
with fits.open(self.temp('tmp.fits')) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices['EXTNAME']) == 1
assert hdu1.name == 'NEW2'
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header['EXTNAME']
hdu.name = 'RE-ADDED'
assert hdu.name == 'RE-ADDED'
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = 'FOO'
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
name = 'BAR'
hdu.name = name
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices['EXTNAME']) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({'HELLO': 'world'})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header['HELLO'] == 'world'
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize('dtype', (np.uint8, np.int16, np.uint16, np.int32,
np.uint32))
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid-50, mid+50, dtype=dtype)
testfile = self.temp('test.fits')
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_write_non_contiguous_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
"""
orig = np.arange(100, dtype=float).reshape((10, 10), order='f')
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp('test.fits'))
actual = fits.getdata(self.temp('test.fits'))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comphdu_bscale(tmp_path):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmp_path / '3hdus.fits'
filename2 = tmp_path / '3hdus_comp.fits'
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename('data/compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmp_path):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmp_path / 'floatimg_with_bzero.fits'
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmp_path):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmp_path / 'test.fits'
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmp_path / 'test2.fits'
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
'''Test for int8 support, https://github.com/astropy/astropy/issues/11995'''
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header['BITPIX'] == 8
assert hdul[0].header['BZERO'] == -128
assert hdul[0].header['BSCALE'] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
{
"content_hash": "6d524b3371b41ced40a77b2508e1ff53",
"timestamp": "",
"source": "github",
"line_count": 2005,
"max_line_length": 106,
"avg_line_length": 40.467331670822944,
"alnum_prop": 0.5571933889594143,
"repo_name": "larrybradley/astropy",
"id": "5092316ce9ac671238f39bf27fb0401cdbfd3f05",
"size": "81201",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/io/fits/tests/test_image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import MoveTo
from cocos.sprite import Sprite
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite( 'grossini.png' )
self.add( self.sprite )
self.sprite.do( MoveTo( (x,y), 10 ) )
if __name__ == "__main__":
director.init(width=300, height=300, do_not_scale=True)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
{
"content_hash": "1d624743faca7edaf667ec899ea87b1b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 65,
"avg_line_length": 25.464285714285715,
"alnum_prop": 0.6199158485273493,
"repo_name": "adamwiggins/cocos2d",
"id": "1b78bbb34d25fd41ea03bae810d158e0fdfda7d4",
"size": "786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_unscaled_win_resize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "825818"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import shlex
import pprint
import signal
import threading
import traceback
from Queue import Queue
from collections import defaultdict
from ConfigParser import *
from cmd import *
from struct import *
from getopt import getopt
from UserDict import *
import vtrace
import vtrace.util as v_util
import vtrace.snapshot as vs_snap
import vtrace.notifiers as v_notif
import vdb
import vdb.stalker as v_stalker
import vdb.extensions as v_ext
import envi
import envi.cli as e_cli
import envi.bits as e_bits
import envi.memory as e_mem
import envi.config as e_config
import envi.memcanvas as e_canvas
import envi.symstore.resolver as e_resolv
import vstruct
import vstruct.primitives as vs_prims
vdb.basepath = vdb.__path__[0] + '/'
class VdbLookup(UserDict):
'''
Used for lookups by key or value.
'''
def __init__(self, initdict=None):
UserDict.__init__(self)
if initdict == None:
return
for key, val in initdict.items():
self.__setitem__(self, key, val)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
UserDict.__setitem__(self, item, key)
class ScriptThread(threading.Thread):
def __init__(self, cobj, locals):
threading.Thread.__init__(self)
self.setDaemon(True)
self.cobj = cobj
self.locals = locals
def run(self):
try:
exec(self.cobj, self.locals)
except Exception, e:
traceback.print_exc()
print('Script Error: %s' % repr(e))
def setupBreakOnEntry(trace):
'''
Sets a one time breakpoint at the __entry symbol. Removes itself as a
notifier after a single NOTIFY_BREAK event.
'''
exefile = trace.normFileName(trace.getExe())
exesym = trace.getSymByName(exefile)
if exesym != None:
entrySym = exesym.getSymByName('__entry')
if entrySym != None:
entrySymExpr = '%s.__entry' % (exefile,)
otb = vtrace.OneTimeBreak(None, expression=entrySymExpr)
trace.addBreakpoint(otb)
class VdbTrace:
"""
Used to hand thing that need a persistant reference to a trace
when using vdb to manage tracers.
"""
def __init__(self, db):
self.db = db
def attach(self, pid):
# Create a new tracer for the debugger and attach.
trace = self.db.newTrace()
trace.attach(pid)
# Take over all notifier registration
def registerNotifier(self, event, notif):
self.db.registerNotifier(event, notif)
def deregisterNotifier(self, event, notif):
self.db.deregisterNotifier(event, notif)
#FIXME should we add modes to this?
def selectThread(self, threadid):
#FIXME perhaps a thread selected LOCAL event?
trace = self.db.getTrace()
trace.selectThread(threadid)
self.db.fireLocalNotifiers(vtrace.NOTIFY_BREAK, trace)
def __getattr__(self, name):
return getattr(self.db.getTrace(), name)
defconfig = {
'vdb':{
'BreakOnEntry':False,
'BreakOnMain':False,
'SymbolCacheActive':True,
'SymbolCachePath':e_config.gethomedir('.envi','symcache'),
'KillOnQuit': False,
},
'cli':{
'verbose':False,
'aliases': {
'<f1>':'stepi',
'<f2>':'go -I 1',
'<f5>':'go',
}
},
}
docconfig = {
'vdb':{
'BreakOnMain':'Should the debugger break on main() if known?',
'BreakOnEntry':'Should the debugger break on the entry to the main module? (only works if you exec (and not attach to) the process)',
'SymbolCacheActive':'Should we cache symbols for subsequent loads?',
'SymbolCachePaths':'Path elements ( ; seperated) to search/cache symbols (filepath,cobra)',
}
}
class WrapExcThread(threading.Thread):
'''
Places the return value or exception information into a queue that can
be checked by the caller.
If the method calls exit(), then nothing will be in the queue.
'''
def __init__(self, target=None, args=tuple(), kwargs={}):
threading.Thread.__init__(self)
self.queue = Queue()
self.target = target
self.args = args
self.kwargs = kwargs
def run(self):
try:
retval = self.target(*self.args, **self.kwargs)
self.queue.put((retval,))
except Exception as e:
tb = traceback.format_exc()
self.queue.put((e, tb))
class Vdb(e_cli.EnviMutableCli, v_notif.Notifier, v_util.TraceManager):
'''
A VDB object is a debugger object which may be used to embed full
debugger like functionality into a python application. The
Vdb object contains a CLI impelementation which extends envi.cli>
'''
def __init__(self, trace=None):
v_notif.Notifier.__init__(self)
v_util.TraceManager.__init__(self)
if trace == None:
trace = vtrace.getTrace()
arch = trace.getMeta('Architecture')
self.arch = envi.getArchModule(arch)
self.bpcmds = {}
self.waitlib = None
self.difftracks = {}
self.runcache = {}
self.runcachectors = {}
self.server = None
self.autoscript = None
self.runagain = False # A one-time thing for the cli
self.windows_jit_event = None
# We hang on to an opcode renderer instance
self.opcoderend = None
# If a VdbGui instance is present it will set this.
self.gui = None
self.setMode('NonBlocking', True)
self.manageTrace(trace)
self.registerNotifier(vtrace.NOTIFY_ALL, self)
# FIXME if config verbose
#self.registerNotifier(vtrace.NOTIFY_ALL, vtrace.VerboseNotifier())
self.vdbhome = e_config.gethomedir('.vdb')
# Load up the config
cfgfile = os.path.join(self.vdbhome, 'vdb.json')
self.config = e_config.EnviConfig(filename=cfgfile, defaults=defconfig)
self.setupSignalLookups()
# Ok... from here down we're handing everybody the crazy
# on-demand-resolved trace object.
trace = vdb.VdbTrace(self)
e_cli.EnviMutableCli.__init__(self, trace, self.config, symobj=trace)
self.prompt = 'vdb > '
self.banner = 'Welcome To VDB!\n'
self.addScriptPathEnvVar('VDB_SCRIPT_PATH')
self.loadDefaultRenderers(trace)
self.loadExtensions(trace)
def addRunCacheCtor(self, name, ctor):
'''
Add a "run cache constructor" which will be used if a RunCacheVar
is requested that is not currently cached. *All* RunCacheVar
entries are flushed automagically on run...
( Allows db caching of critical structs likely to be parsed
more than once by extensions )
'''
self.runcachectors[name] = ctor
def getRunCacheVar(self, cname):
'''
Retrieve a variable from the vdb "runcache". If not currently
cached, the object will be constructed and added to the cache
so that future references are fast.
'''
ret = self.runcache.get(cname)
if ret == None:
ret = self.runcachectors.get(cname)(self)
self.runcache[cname] = ret
return ret
def loadDefaultRenderers(self, trace):
import envi.memcanvas.renderers as e_render
import vdb.renderers as v_rend
# FIXME check endianness
self.canvas.addRenderer("bytes", e_render.ByteRend())
self.canvas.addRenderer("u_int_16", e_render.ShortRend())
self.canvas.addRenderer("u_int_32", e_render.LongRend())
self.canvas.addRenderer("u_int_64", e_render.QuadRend())
self.opcoderend = v_rend.OpcodeRenderer(self.trace)
self.canvas.addRenderer("asm", self.opcoderend)
stackrend = v_rend.StackRenderer(self.trace)
self.canvas.addRenderer('Stack View', stackrend)
drend = v_rend.DerefRenderer(self.trace)
self.canvas.addRenderer("Deref View", drend)
srend = v_rend.SymbolRenderer(self.trace)
self.canvas.addRenderer('Symbols View', srend)
for arch in envi.getArchModules():
if arch == None: # The "empty" default...
continue
archid = arch.getArchId()
archname = arch.getArchName()
archrend = v_rend.OpcodeRenderer( self.trace, arch=archid)
self.canvas.addRenderer('asm - %s' % archname, archrend)
def verror(self, msg, addnl=True):
if addnl:
msg += "\n"
sys.stderr.write(msg)
def fatalError(self, exception):
'''
Used for platform exceptions. This indicates something in the
underlying platform failed and continuing to debug is probably not a
good idea.
'''
self.vprint('%s: %s' % ('FATAL ERROR (you probably should restart session', exception))
def vdbUIEvent(self, event, einfo=None):
'''
Fire a UI event (mostly used by the GUI to force refresh)
Do *not* fire this API in a tight loop, rather, fire once when
changes are complete.
NOTE: Events should only be created for notification on
events *not* already emitted by the tracer.
'''
if self.gui != None:
self.gui.vdbUIEvent(event, einfo)
def loadExtensions(self, trace):
"""
Load up any extensions which are relevant for the current tracer's
platform/arch/etc...
"""
v_ext.loadExtensions(self, trace)
def getTrace(self):
return self.trace
def newTrace(self):
"""
Generate a new trace for this vdb instance. This fixes many of
the new attach/exec data munging issues because tracer re-use is
*very* sketchy...
"""
oldtrace = self.getTrace()
if oldtrace.isRunning():
oldtrace.sendBreak()
if oldtrace.isAttached():
oldtrace.detach()
self.trace = oldtrace.buildNewTrace()
oldtrace.release()
self.bpcmds = {}
self.manageTrace(self.trace)
return self.trace
def setupSignalLookups(self):
self.siglookup = VdbLookup()
self.siglookup[0] = 'None'
for name in dir(signal):
if name[:3] == 'SIG' and '_' not in name:
self.siglookup[name] = getattr(signal, name)
def getSignal(self, sig):
"""
If given an int, return the name, for a name, return the int ;)
"""
return self.siglookup.get(sig,None)
def parseExpression(self, exprstr):
return self.trace.parseExpression(exprstr)
def getExpressionLocals(self):
trace = vdb.VdbTrace(self)
r = vtrace.VtraceExpressionLocals(trace)
r['db'] = self
r['vprint'] = self.vprint
return r
def reprPointer(self, address):
"""
Return a string representing the best known name for
the given address
"""
if not address:
return "NULL"
# Do we have a symbol?
sym = self.trace.getSymByAddr(address, exact=False)
if sym != None:
return "%s + %d" % (repr(sym),address-long(sym))
# Check if it's a thread's stack
for tid,tinfo in self.trace.getThreads().items():
ctx = self.trace.getRegisterContext(tid)
sp = ctx.getStackCounter()
smap = self.trace.getMemoryMap(sp)
if not smap:
continue
stack,size,perms,fname = smap
if address >= stack and address < (stack+size):
off = address - sp
op = "+"
if off < 0:
op = "-"
off = abs(off)
return "tid:%d sp%s%s (stack)" % (tid,op,off)
map = self.trace.getMemoryMap(address)
if map:
return map[3]
return "Who knows?!?!!?"
def notify(self, event, trace):
pid = trace.getPid()
tid = trace.getCurrentThread()
# Any kind of event resets the runcache
self.runcache = {}
if event == vtrace.NOTIFY_ATTACH:
self.vprint("Attached to : %d" % pid)
self.waitlib = None
self.difftracks = {}
if self.windows_jit_event:
trace._winJitEvent(self.windows_jit_event)
self.windows_jit_event = None
# Initialize the tracer's symbol cache path
if self.config.vdb.SymbolCacheActive:
trace.setSymCachePath(self.config.vdb.SymbolCachePath)
# only respect BreakOnEntry if we exec'd something
if self.config.vdb.BreakOnEntry and trace.hasMeta('ExecCommand'):
self.runagain = True # skip initial break
if self.autoscript:
self.do_script(self.autoscript)
elif event == vtrace.NOTIFY_CONTINUE:
pass
elif event == vtrace.NOTIFY_DETACH:
self.difftracks = {}
self.vprint("Detached from %d" % pid)
elif event == vtrace.NOTIFY_SIGNAL:
# FIXME move all this code into a bolt on notifier!
thr = trace.getCurrentThread()
signo = trace.getCurrentSignal()
self.vprint("Process Recieved Signal %d (0x%.8x) (Thread: %d (0x%.8x))" % (signo, signo, thr, thr))
faddr,fperm = trace.getMemoryFault()
if faddr != None:
accstr = e_mem.getPermName(fperm)
self.vprint('Memory Fault: addr: 0x%.8x perm: %s' % (faddr, accstr))
elif event == vtrace.NOTIFY_BREAK:
trace.setMeta('PendingBreak', False)
bp = trace.getCurrentBreakpoint()
if bp:
self.vprint("Thread: %d Hit Break: %s" % (tid, repr(bp)))
cmdstr = self.bpcmds.get(bp.id, None)
if cmdstr != None:
self.onecmd(cmdstr)
else:
self.vprint("Thread: %d NOTIFY_BREAK" % tid)
if self.runagain: # One-time run-again behavior (for cli option)
if self.config.vdb.BreakOnEntry:
setupBreakOnEntry(trace)
trace.runAgain()
self.runagain = False
elif event == vtrace.NOTIFY_EXIT:
ecode = trace.getMeta('ExitCode')
self.vprint("PID %d exited: %d (0x%.8x)" % (pid,ecode,ecode))
elif event == vtrace.NOTIFY_LOAD_LIBRARY:
self.vprint("Loading Binary: %s" % trace.getMeta("LatestLibrary",None))
if self.waitlib != None:
normname = trace.getMeta('LatestLibraryNorm', None)
if self.waitlib == normname:
self.waitlib = None
trace.runAgain(False)
elif event == vtrace.NOTIFY_UNLOAD_LIBRARY:
self.vprint("Unloading Binary: %s" % trace.getMeta("LatestLibrary",None))
elif event == vtrace.NOTIFY_CREATE_THREAD:
self.vprint("New Thread: %d" % tid)
elif event == vtrace.NOTIFY_EXIT_THREAD:
ecode = trace.getMeta("ExitCode", 0)
self.vprint("Exit Thread: %d (ecode: 0x%.8x (%d))" % (tid,ecode,ecode))
elif event == vtrace.NOTIFY_DEBUG_PRINT:
s = "<unknown>"
win32 = trace.getMeta("Win32Event", None)
if win32:
s = win32.get("DebugString", "<unknown>")
self.vprint("DEBUG PRINT: %s" % s)
else:
pass
#self.vprint('unhandled event: %d' % event)
###################################################################
#
# All CLI extension commands start here
#
# FIXME this is duplicate, but... PUNT...
def do_writemem(self, args):
"""
Over-write some memory in the target address space.
Usage: writemem [options] <addr expression> <string>
-X The specified string is in hex (ie 414141 = AAA)
-U The specified string needs to be unicode in mem (AAA -> 410041004100)
"""
dohex = False
douni = False
try:
argv = e_cli.splitargs(args)
opts,args = getopt(argv, "XU")
except:
return self.do_help("writemem")
if len(args) != 2:
return self.do_help("writemem")
for opt,optarg in opts:
if opt == "-X":
dohex = True
elif opt == "-U":
douni = True
exprstr, memstr = args
if dohex: memstr = memstr.decode('hex')
if douni: memstr = ("\x00".join(memstr)) + "\x00"
addr = self.parseExpression(exprstr)
self.memobj.writeMemory(addr, memstr)
self.vdbUIEvent('vdb:writemem', (addr,memstr))
def do_vstruct(self, line):
"""
List the available structure modules and optionally
structure definitions from a particular module in the
current vstruct.
Usage: vstruct [modname]
"""
if len(line) == 0:
self.vprint("\nVStruct Namespaces:")
plist = self.trace.getStructNames()
else:
self.vprint("\nKnown Structures (from %s):" % line)
plist = self.trace.getStructNames(namespace=line)
plist.sort()
for n in plist:
self.vprint(str(n))
self.vprint("\n")
def do_dis(self, line):
"""
Print out the opcodes for a given address expression
Usage: dis <address expression> [<size expression>]
"""
argv = e_cli.splitargs(line)
size = 20
argc = len(argv)
if argc == 0:
addr = self.trace.getProgramCounter()
else:
addr = self.parseExpression(argv[0])
if argc > 1:
size = self.parseExpression(argv[1])
self.vprint("Dissassembly:")
self.canvas.renderMemory(addr, size, rend=self.opcoderend)
def do_var(self, line):
"""
Set a variable in the expression parsing context. This allows
for scratchspace names (python compatable names) to be used in
expressions.
Usage: var <name> <addr_expression>
NOTE: The address expression *must* resolve at the time you set it.
"""
t = self.trace
if len(line):
argv = e_cli.splitargs(line)
if len(argv) == 1:
return self.do_help("var")
name = argv[0]
expr = " ".join(argv[1:])
addr = t.parseExpression(expr)
t.setVariable(name, addr)
vars = t.getVariables()
self.vprint("Current Variables:")
if not vars:
self.vprint("None.")
else:
vnames = vars.keys()
vnames.sort()
for n in vnames:
val = vars.get(n)
if type(val) in (int, long):
self.vprint("%20s = 0x%.8x" % (n,val))
else:
rstr = repr(val)
if len(rstr) > 30:
rstr = rstr[:30] + '...'
self.vprint("%20s = %s" % (n,rstr))
def do_alloc(self, args):
#"""
#Allocate a chunk of memory in the target process. You may
#optionally specify permissions and a suggested base address.
#Usage: alloc [-p rwx] [-s <base>] <size>
#"""
"""
Allocate a chunk of memory in the target process. It will be
allocated with rwx permissions.
Usage: alloc <size expr>
"""
if len(args) == 0:
return self.do_help("alloc")
t = self.trace
#argv = e_cli.splitargs(args)
try:
size = t.parseExpression(args)
base = t.allocateMemory(size)
self.vprint("Allocated %d bytes at: 0x%.8x" % (size, base))
except Exception, e:
traceback.print_exc()
self.vprint("Allocation Error: %s" % e)
def do_autoscript(self, line):
'''
Tell vdb to run a python script on every process attach.
Usage: autoscript <scriptfile>|clear
'''
argv = e_cli.splitargs(line)
if len(argv) != 1:
self.vprint('Current Autoscript: %s' % self.autoscript)
return
if argv[0] == 'clear':
self.vprint('clearing autoscript: %s' % self.autoscript)
return
if not os.path.isfile(argv[0]):
self.vprint('Error: %s is not a valid file' % argv[0])
return
self.autoscript = argv[0]
def do_memload(self, line):
'''
Load a file into memory. (straight mapping, no parsing)
Usage: memload <filename>
'''
argv = e_cli.splitargs(line)
if len(argv) != 1:
return self.do_help('memload')
fname = argv[0]
if not os.path.isfile(fname):
self.vprint('Invalid File: %s' % fname)
return
fbytes = file(fname, 'rb').read()
memva = self.trace.allocateMemory(len(fbytes))
self.trace.writeMemory(memva, fbytes)
self.vprint('Loaded At: 0x%.8x (%d bytes)' % (memva, len(fbytes)))
def do_struct(self, line):
'''
Show and optionally apply a vstruct definition to memory.
Use the 'vstruct' command to find and display a structure of interest.
Usage: struct <vstruct name> [memory expression]
'''
argv = shlex.split(line)
if len(argv) not in (1, 2):
return self.do_help('struct')
clsname = argv[0]
expr = None
va = None
if len(argv) == 2:
expr = argv[1]
va = self.trace.parseExpression(expr)
sinfo = self.trace.getStruct(clsname, va=va)
if sinfo is None:
self.vprint('%s not found.' % clsname)
return
# yuck.
if len(argv) == 1:
va = 0
stree = sinfo.tree(va=va)
self.vprint(stree)
def do_signal(self, args):
"""
Show the current pending signal/exception code.
Usage: signal
"""
# FIXME -i do NOT pass the signal on to the target process.
t = self.trace
t.requireAttached()
cursig = t.getCurrentSignal()
if cursig == None:
self.vprint('No Pending Signals/Exceptions!')
else:
self.vprint("Current signal: %d (0x%.8x)" % (cursig, cursig))
def do_snapshot(self, line):
"""
Take a process snapshot of the current (stopped) trace and
save it to the specified file.
Usage: snapshot <filename>
"""
if len(line) == 0:
return self.do_help("snapshot")
alist = e_cli.splitargs(line)
if len(alist) != 1:
return self.do_help("snapshot")
t = self.trace
t.requireAttached()
self.vprint("Taking Snapshot...")
snap = vs_snap.takeSnapshot(t)
self.vprint("Saving To File")
snap.saveToFile(alist[0])
self.vprint("Done")
snap.release()
def do_ignore(self, args):
"""
Add the specified signal id (exception id for windows) to the ignored
signals list for the current trace. This will make the smallest possible
performance impact for that particular signal but will also not alert
you that it has occured.
Usage: ignore [options] [-c | <sigcode>...]
-d - Remove the specified signal codes.
-c - Include the *current* signal in the sigcode list
-C - Clear the list of ignored signals
Example: ignore -c # Ignore the currently posted signal
ignore -d 0x80000001 # Remove 0x80000001 from the ignores
"""
argv = e_cli.splitargs(args)
try:
opts,args = getopt(argv, 'Ccd')
except Exception, e:
return self.do_help('ignore')
remove = False
sigs = []
for opt,optarg in opts:
if opt == '-c':
sig = self.trace.getCurrentSignal()
if sig == None:
self.vprint('No current signal to ignore!')
return
sigs.append(sig)
elif opt == '-C':
self.vprint('Clearing ignore list...')
self.trace.setMeta('IgnoredSignals', [])
elif opt == '-d':
remove = True
for arg in args:
sigs.append(self.trace.parseExpression(arg))
for sig in sigs:
if remove:
self.vprint('Removing: 0x%.8x' % sig)
self.trace.delIgnoreSignal(sig)
else:
self.vprint('Adding: 0x%.8x' % sig)
self.trace.addIgnoreSignal(sig)
ilist = self.trace.getMeta("IgnoredSignals")
self.vprint("Currently Ignored Signals/Exceptions:")
for x in ilist:
self.vprint("0x%.8x (%d)" % (x, x))
def do_exec(self, cmd):
"""
Execute a program with the given command line and
attach to it.
Usage: exec </some/where and some args>
"""
t = self.newTrace()
t.execute(cmd)
def do_threads(self, line):
"""
List the current threads in the target process or select
the current thread context for the target tracer.
Usage: threads [thread id]
"""
self.trace.requireNotRunning()
if self.trace.isRunning():
self.vprint("Can't list threads while running!")
return
if len(line) > 0:
thrid = int(line, 0)
self.trace.selectThread(thrid)
self.vdbUIEvent('vdb:setthread', thrid)
self.vprint("Current Threads:")
self.vprint("[thrid] [thrinfo] [pc]")
curtid = self.trace.getMeta("ThreadId")
for tid, tinfo in self.trace.getThreads().items():
a = " "
if tid == curtid:
a = "*"
sus = ""
if self.trace.isThreadSuspended(tid):
sus = "(suspended)"
ctx = self.trace.getRegisterContext(tid)
pc = ctx.getProgramCounter()
self.vprint("%s%6d 0x%.8x 0x%.8x %s" % (a, tid, tinfo, pc, sus))
def do_suspend(self, line):
"""
Suspend a thread.
Usage: suspend <-A | <tid>[ <tid>...]>
"""
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "A")
except Exception, e:
return self.do_help("suspend")
for opt,optarg in opts:
if opt == "-A":
# hehe...
args = [str(tid) for tid in self.trace.getThreads().keys()]
if not len(args):
return self.do_help("suspend")
for arg in args:
tid = int(arg)
self.trace.suspendThread(tid)
self.vprint("Suspended Thread: %d" % tid)
def do_restart(self, line):
'''
Restart the current process.
Usage: restart
NOTE: This only works if the process was exec'd to begin with!
TODO: Plumb options for persisting bp's etc...
'''
t = self.trace
cmdline = t.getMeta('ExecCommand')
if cmdline == None:
self.vprint('This trace was not fired with exec! (cannot restart)')
return
if t.isRunning():
t.setMode("RunForever", False)
t.sendBreak()
if t.isAttached():
t.kill()
t = self.newTrace()
t.execute(cmdline)
def do_resume(self, line):
"""
Resume a thread.
Usage: resume <-A | <tid>[ <tid>...]>
"""
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "A")
except Exception, e:
return self.do_help("suspend")
for opt,optarg in opts:
if opt == "-A":
# hehe...
args = [str(tid) for tid in self.trace.getThreads().keys()]
if not len(args):
return self.do_help("resume")
for arg in args:
tid = int(arg)
self.trace.resumeThread(tid)
self.vprint("Resumed Thread: %d" % tid)
#def do_inject(self, line):
def do_mode(self, args):
"""
Set modes in the tracers...
mode Foo=True/False
"""
if args:
mode,val = args.split("=")
newmode = eval(val)
self.setMode(mode, newmode)
else:
for key,val in self.trace.modes.items():
self.vprint("%s -> %d" % (key,val))
def do_reg(self, args):
"""
Show the current register values. Additionally, you may specify
name=<expression> to set a register
Usage: reg [regname=vtrace_expression]
"""
if len(args):
if args.find("=") == -1:
return self.do_help("reg")
regname,expr = args.split("=", 1)
val = self.trace.parseExpression(expr)
self.trace.setRegisterByName(regname, val)
self.vprint("%s = 0x%.8x" % (regname, val))
self.vdbUIEvent('vdb:setregs')
return
regs = self.trace.getRegisters()
rnames = regs.keys()
rnames.sort()
final = []
for r in rnames:
# Capitol names are used for reg vals that we don't want to see
# (by default)
if r.lower() != r:
continue
val = regs.get(r)
vstr = e_bits.hex(val, 4)
final.append(("%12s:0x%.8x (%d)" % (r,val,val)))
self.columnize(final)
def complete_reg(self, text, line, bigidx, endidx):
if '=' in line:
return []
regs = self.trace.getRegisters().keys()
if not text:
return regs
if text in regs:
return [ text + '=' ]
return [ i for i in regs if i.startswith(text) ]
def do_stepi(self, line):
"""
Single step the target tracer.
Usage: stepi [ options ]
-A <addr> - Step to <addr>
-B - Step past the next branch instruction
-C <count> - Step <count> instructions
-R - Step to return from this function
-V - Show operand values during single step (verbose!)
-U - Remainder of args is "step until" expression (stop on True)
-Q - Do not output to canvas
"""
t = self.trace
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "A:BC:RVUQ")
except Exception, e:
return self.do_help("stepi")
until = None
count = None
taddr = None
toret = False
tobrn = False
showop = False
quiet = False
for opt, optarg in opts:
if opt == '-A':
taddr = t.parseExpression(optarg)
elif opt == '-B':
tobrn = True
elif opt == '-C':
count = t.parseExpression(optarg)
elif opt == '-R':
toret = True
elif opt == '-V':
showop = True
elif opt == '-U':
until = ' '.join(args)
elif opt == '-Q':
quiet = True
if ( count == None
and taddr == None
and until == None
and toret == False
and tobrn == False):
count = 1
oldmode = self.getMode('FastStep')
self.setMode('FastStep', True)
hits = 0
depth = 0
try:
while True:
pc = t.getProgramCounter()
if pc == taddr:
break
op = t.parseOpcode(pc)
sym = t.getSymByAddr(pc)
if sym != None and not quiet:
self.canvas.addVaText(repr(sym), pc)
self.canvas.addText(':\n')
if not quiet:
self.canvas.addText(' ' * max(depth,0))
self.canvas.addVaText('0x%.8x' % pc, pc)
self.canvas.addText(': ')
op.render(self.canvas)
# these options are really mutually exclusive
if showop and not quiet:
self.canvas.addText(' ; ')
for oper in op.opers:
try:
val = oper.getOperValue(op, emu=t)
self.canvas.addText('0x%.8x ' % val)
except Exception, e:
self.canvas.addText(str(e))
if not quiet:
self.canvas.addText('\n')
if op.iflags & envi.IF_CALL:
depth += 1
elif op.iflags & envi.IF_RET:
depth -= 1
tid = t.getCurrentThread()
t.stepi()
if until and t.parseExpression(until):
break
# If we get an event from a different thread, get out!
if t.getCurrentThread() != tid:
break
# Break out if we have returned from the current function
if toret and depth < 0:
break
if depth < 0:
depth = 0
hits += 1
# If we have passed a conditional branch...
if tobrn == True and hits != 0:
if op.iflags & envi.IF_CALL:
break
if op.iflags & envi.IF_RET:
break
getout = False
for bva, bflags in op.getBranches():
if bflags & envi.BR_COND:
getout = True
break
if getout:
break
if count != None and hits >= count:
break
if t.getCurrentSignal() != None:
break
if t.getMeta('PendingSignal'):
break
finally:
self.setMode('FastStep', oldmode)
# We ate all the events, tell things we have updated...
t.fireNotifiers(vtrace.NOTIFY_STEP)
def do_stepo(self, line):
'''
Step over current instruction.
Executes the current instruction unless it is a procedure call.
If it is a procedure call, sets a breakpoint on the instruction after
the call.
'''
op = self.trace.parseOpcode(self.trace.getProgramCounter())
if not op.isCall():
self.trace.stepi()
else:
# should we make this like stepout?
bp = vtrace.breakpoints.OneTimeBreak(op.va + op.size)
self.trace.addBreakpoint(bp)
self.trace.run()
def do_stepout(self, line):
'''
Step out of the current function. (stepi or stepover until return)
Single step (stepping over procedure calls) until a return
instruction. Breaks on the return instruction.
Usage: stepout [options]
-V verbose, print step instructions. (much slower)
'''
args = shlex.split(line)
verbose = False
if len(args) not in (0, 1):
return self.do_help('stepout')
if len(args) == 1:
if '-V' != args[0]:
return self.do_help('stepout')
verbose = True
nb = self.trace.getMode('NonBlocking')
self.trace.setMode('NonBlocking', False)
fs = self.trace.getMode('FastStep')
self.trace.setMode('FastStep', True)
tid = self.trace.getCurrentThread()
waitva = None
try:
while True:
op = self.trace.parseOpcode(self.trace.getProgramCounter())
if op.isReturn():
break
if self.trace.getCurrentSignal() != None:
self.vprint('do_stepout: received signal, stopping')
break
if self.trace.getMeta('PendingSignal'):
self.vprint('do_stepout: pending signal, stopping')
break
if self.trace.getCurrentThread() != tid:
self.vprint('do_stepout: event from different thread, stopping')
break
if op.isCall():
waitva = op.va + op.size
self.trace.run(until=waitva)
continue
if verbose:
self.do_stepi('')
else:
self.trace.stepi()
except Exception as e:
self.vprint('do_stepout: exception %s, stopping' % (str(e)))
finally:
self.trace.setMode('NonBlocking', nb)
self.trace.setMode('FastStep', fs)
# make sure waitva is gone
bpid = self.trace.getBreakpointByAddr(waitva)
if bpid != None:
self.trace.removeBreakpoint(bpid)
self.trace.fireNotifiers(vtrace.NOTIFY_STEP)
def do_go(self, line):
'''
Continue the target tracer.
-I go icount linear instructions forward (step over style)
-U go *out* of fcount frames (step out style)
<until addr> go until explicit address
Usage: go [-U <fcount> | -I <icount> | <until addr expression>]
'''
until = None
icount = None
fcount = None
argv = e_cli.splitargs(line)
try:
opts, args = getopt(argv, 'U:I:')
except:
return self.do_help('go')
for opt, optarg in opts:
if opt == '-U':
if len(optarg) == 0: return self.do_help('go')
fcount = self.trace.parseExpression(optarg)
elif opt == '-I':
if len(optarg) == 0: return self.do_help('go')
icount = self.trace.parseExpression(optarg)
if icount != None:
addr = self.trace.getProgramCounter()
for i in xrange(icount):
addr += len(self.trace.parseOpcode(addr))
until = addr
elif fcount != None:
until = self.trace.getStackTrace()[fcount][0]
elif len(args):
until = self.trace.parseExpression(' '.join(args))
if not until:
self.vprint("Running Tracer (use 'break' to stop it)")
self.trace.run(until=until)
def do_gui(self, line):
'''
Attempt to spawn the VDB gui.
'''
if self.gui != None:
self.vprint('Gui already running!')
return
import vqt.main as vq_main
import vdb.qt.main as vdb_q_main
import vqt.colors as vq_colors
vq_main.startup(css=vq_colors.qt_matrix)
qgui = vdb_q_main.VdbWindow(self)
qgui.show()
vq_main.main()
def do_waitlib(self, line):
'''
Run the target process until the specified library
(by normalized name such as 'kernel32' or 'libc')
is loaded. Disable waiting with -D.
Usage: waitlib [ -D | <libname> ]
'''
t = self.trace
pid = t.getPid()
t.requireAttached()
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "D")
except:
return self.do_help("waitlib")
for opt, optarg in opts:
if opt == '-D':
self.vprint('Disabling Wait On: %s' % self.waitlib)
self.waitlib = None
return
if len(args) != 1:
return self.do_help('waitlib')
libname = args[0]
if t.getMeta('LibraryBases').get(libname) != None:
self.vprint('Library Already Loaded: %s' % libname)
return
self.vprint('Setting Waitlib: %s' % libname)
self.waitlib = libname
def do_server(self, port):
"""
Start a vtrace server on the local box. If the server
is already running, show which processes are being remotely
debugged.
Usage: server
"""
if port:
vtrace.port = int(port)
if self.server == None:
self.vprint('Starting vtrace server!')
self.server = vtrace.startVtraceServer()
return
self.vprint('Displaying remotely debugged traces:')
shared = [ t for (n,t) in self.server.getSharedObjects() if isinstance(t, vtrace.Trace) ]
if not shared:
self.vprint('None.')
return
for t in shared:
if not t.isAttached():
continue
runmsg = 'stopped'
if t.isRunning():
runmsg = 'running'
pid = t.getPid()
name = t.getMeta('ExeName', 'Unknown')
self.vprint('%6d %.8s - %s' % (pid, runmsg, name))
def do_syms(self, line):
'''
List symbols for loaded libraries. Use 'lm' to see loaded libraries.
-s <regex> a regular expression (case insensitive search)
<libname> the library name
Usage: syms [-s <regex>] [<libname> ...]
Usage: show all symbols for library foobar
syms foobar
Usage: show specific symbols for library foobar and bazfaz
syms -s .*?barfoo.* foobar bazfaz
Usage: shows specific symbols in any library
syms -s .*?barfoo.*
'''
argv = shlex.split(line)
if len(argv) < 1:
return self.do_help('syms')
rgx = None
if '-s' in argv:
idx = argv.index('-s')
argv.pop(idx)
rgx = argv.pop(idx)
s = set(argv)
libs = self.trace.getNormalizedLibNames()
if len(s) > 0:
libs = [lib for lib in libs if lib in s]
if len(libs) == 0 and rgx == None:
self.vprint('invalid library names: %s' % argv)
return self.do_help('syms')
for lib in sorted(libs):
for sym in self.trace.getSymsForFile(lib):
r = repr(sym)
if rgx != None:
match = re.search(rgx, r, re.IGNORECASE)
if match == None:
continue
self.vprint('0x%.8x %s' % (sym.value, r))
def do_call(self, line):
"""
Allows a C-like syntax for calling functions inside
the target process (from his context).
Example: call printf("yermom %d", 10)
"""
self.trace.requireAttached()
ind = line.index("(")
if ind == -1:
raise Exception('ERROR - call wants c-style syntax: ie call printf("yermom")')
funcaddr = self.trace.parseExpression(line[:ind])
try:
args = eval(line[ind:])
except:
raise Exception('ERROR - call wants c-style syntax: ie call printf("yermom")')
self.vprint("calling %s -> 0x%.8x" % (line[:ind], funcaddr))
self.trace.call(funcaddr, args)
def do_bestname(self, args):
"""
Return the "best name" string for an address.
Usage: bestname <vtrace expression>
"""
if len(args) == 0:
return self.do_help("bestname")
addr = self.trace.parseExpression(args)
self.vprint(self.reprPointer(addr))
def do_EOF(self, string):
'''
Prints how to exit VDB (use quit).
'''
self.vprint("No.. this is NOT a python interpreter... use quit ;)")
def do_quit(self, args):
"""
Quit VDB and terminate the process.
use "quit force" to hard-force a quit regardless of everything.
"""
if args == 'force':
print('Quitting by force!')
os._exit(0)
try:
if self.trace.isRunning():
self.trace.setMode('RunForever', False)
self.trace.sendBreak()
if self.trace.isAttached():
if self.config.vdb.KillOnQuit:
self.trace.kill()
else:
self.trace.detach()
self.vprint('Exiting...')
e_cli.EnviMutableCli.do_quit(self, args)
self.trace.release()
except Exception, e:
self.vprint('Exception during quit (may need: quite force): %s' % e)
def do_detach(self, line):
'''
Detach from the current tracer.
Detaching using -k terminates the process on detach.
Usage: detach [-k]
'''
self.trace.requireAttached()
argv = e_cli.splitargs(line)
if '-k' in argv:
self.trace.kill()
if not self.trace.isRunning():
self.trace.run()
else:
if self.trace.isRunning():
self.trace.setMode("RunForever", False)
self.trace.sendBreak()
self.trace.detach()
def do_attach(self, args):
"""
Attach to a process by PID or by process name. In
the event of more than one process by a given name,
attach to the last (most recently created) one in
the list.
Usage: attach [<pid>,<name>]
NOTE: This is *not* a regular expression. The given
string must be found as a substring of the process
name...
"""
pid = None
try:
pid = int(args)
except ValueError, e:
for mypid, pname in self.trace.ps():
if pname.find(args) != -1:
pid = mypid
if pid == None:
return self.do_help('attach')
self.vprint("Attaching to %d" % pid)
self.newTrace().attach(pid)
def complete_attach(self, text, line, begidx, endidx):
procs = self.trace.ps()
pidlist = [ str(x) for x,y in procs ]
proclist = [ y for x,y in procs ]
if not text:
return proclist
if text.isdigit():
return [ i for i in pidlist if i.startswith(text) ]
return [ i for i in proclist if i.find(text) != -1 ]
def do_autocont(self, line):
"""
Manipulate the auto-continue behavior for the trace. This
will cause particular event types to automagically continue
execution.
Usage: autocont [event name]
"""
argv = e_cli.splitargs(line)
acnames = ["attach",
"signal",
"break",
"loadlib",
"unloadlib",
"createthread",
"exitthread",
"dbgprint"]
acvals = [ vtrace.NOTIFY_ATTACH,
vtrace.NOTIFY_SIGNAL,
vtrace.NOTIFY_BREAK,
vtrace.NOTIFY_LOAD_LIBRARY,
vtrace.NOTIFY_UNLOAD_LIBRARY,
vtrace.NOTIFY_CREATE_THREAD,
vtrace.NOTIFY_EXIT_THREAD,
vtrace.NOTIFY_DEBUG_PRINT]
c = self.trace.getAutoContinueList()
if len(line):
try:
index = acnames.index(line)
except ValueError, e:
self.vprint("Unknown event name: %s" % line)
return
sig = acvals[index]
if sig in c:
self.trace.disableAutoContinue(sig)
c.remove(sig)
else:
self.trace.enableAutoContinue(sig)
c.append(sig)
self.vprint("Auto Continue Status:")
for i in range(len(acnames)):
name = acnames[i]
sig = acvals[i]
acont = False
if sig in c:
acont = True
self.vprint("%s %s" % (name.rjust(14),repr(acont)))
self.vdbUIEvent('vdb:setautocont')
def do_bt(self, line):
"""
Show a stack backtrace for the currently selected thread.
Usage: bt
"""
self.vprint(" [ PC ] [ Frame ] [ Location ]")
idx = 0
for pc,frame in self.trace.getStackTrace():
self.vprint("[%3d] 0x%.8x 0x%.8x %s" % (idx,pc,frame,self.reprPointer(pc)))
idx += 1
def do_lm(self, args):
"""
Show the loaded libraries and their base addresses.
Usage: lm [libname]
"""
bases = self.trace.getMeta("LibraryBases")
paths = self.trace.getMeta("LibraryPaths")
if len(args):
base = bases.get(args)
path = paths.get(base, "unknown")
if base == None:
self.vprint("Library %s is not found!" % args)
else:
self.vprint("0x%.8x - %s %s" % (base, args, path))
else:
self.vprint("Loaded Libraries:")
names = self.trace.getNormalizedLibNames()
names.sort()
names = e_cli.columnstr(names)
for libname in names:
base = bases.get(libname.strip(), -1)
path = paths.get(base, "unknown")
self.vprint("0x%.8x - %.30s %s" % (base, libname, path))
def do_guid(self, line):
"""
Parse and display a Global Unique Identifier (GUID) from memory
(eventually, use GUID db to lookup the name/meaning of the GUID).
Usage: guid <addr_exp>
"""
self.trace.requireNotRunning()
if not line:
return self.do_help("guid")
addr = self.parseExpression(line)
guid = vs_prims.GUID()
bytes = self.trace.readMemory(addr, len(guid))
guid.vsSetValue(bytes)
self.vprint("GUID 0x%.8x %s" % (addr, repr(guid)))
def do_bpfile(self, line):
"""
Set the python code for a breakpoint from the contents
of a file.
Usage: bpfile <bpid> <filename>
"""
argv = e_cli.splitargs(line)
if len(argv) != 2:
return self.do_help("bpfile")
bpid = int(argv[0])
pycode = file(argv[1], "rU").read()
self.trace.setBreakpointCode(bpid, pycode)
def do_bpedit(self, line):
"""
Manipulcate the python code that will be run for a given
breakpoint by ID. (Also the way to view the code).
Usage: bpedit <id> ["optionally new code"]
NOTE: Your code must be surrounded by "s and may not
contain any "s
"""
argv = e_cli.splitargs(line)
if len(argv) == 0:
return self.do_help("bpedit")
bpid = int(argv[0])
if len(argv) == 2:
self.trace.setBreakpointCode(bpid, argv[1])
pystr = self.trace.getBreakpointCode(bpid)
self.vprint("[%d] Breakpoint code: %s" % (bpid,pystr))
def do_bp(self, line):
"""
Show, add, and enable/disable breakpoints
USAGE: bp [-d <addr>] [-a <addr>] [-o <addr>] [[-c pycode] <address> [vdb cmds]]
-C - Clear All Breakpoints
-c "py code" - Set the breakpoint code to the given python string
-d <id> - Disable Breakpoint
-e <id> - Enable Breakpoint
-r <id> - Remove Breakpoint
-o <addr> - Create a OneTimeBreak
-L <libname> - Add bp's to all functions in <libname>
-F <filename> - Load bpcode from file
-W perms:size - Set a hardware Watchpoint with perms/size (ie -W rw:4)
-f - Make added breakpoints from this command into "fastbreaks"
-S <libname>:<regex> - Add bp's to all matching funcs in <libname>
<address>... - Create Breakpoint
[vdb cmds].. - (optional) vdb cli comand to run on BP hit (seperate
multiple commands with ;; )
NOTE: -c adds python code to the breakpoint. The python code will
be run with the following objects mapped into it's namespace
automagically:
vtrace - the vtrace package
trace - the tracer
bp - the breakpoint object
"""
self.trace.requireNotRunning()
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "fF:e:d:o:r:L:Cc:S:W:")
except Exception, e:
return self.do_help('bp')
pycode = None
wpargs = None
fastbreak = False
libsearch = None
for opt,optarg in opts:
if opt == "-e":
self.trace.setBreakpointEnabled(eval(optarg), True)
elif opt == "-c":
pycode = optarg
test = compile(pycode, "test","exec")
elif opt == "-F":
pycode = file(optarg, "rU").read()
elif opt == '-f':
fastbreak = True
elif opt == "-r":
self.bpcmds.pop(int(optarg), None)
self.trace.removeBreakpoint(int(optarg))
self.vdbUIEvent('vdb:delbreak', int(optarg))
elif opt == "-C":
for bp in self.trace.getBreakpoints():
self.bpcmds.pop(bp.id, None)
self.trace.removeBreakpoint(bp.id)
self.vdbUIEvent('vdb:delbreak', bp.id)
elif opt == "-d":
self.trace.setBreakpointEnabled(eval(optarg), False)
elif opt == "-o":
bpid = self.trace.addBreakpoint(vtrace.OneTimeBreak(None, expression=optarg))
self.vdbUIEvent('vdb:addbreak', bpid)
elif opt == "-L":
for sym in self.trace.getSymsForFile(optarg):
if not isinstance(sym, e_resolv.FunctionSymbol):
continue
try:
bp = vtrace.Breakpoint(None, expression=str(sym))
bp.setBreakpointCode(pycode)
bpid = self.trace.addBreakpoint(bp)
self.vdbUIEvent('vdb:addbreak', bpid)
self.vprint("Added: %s" % str(sym))
except Exception, msg:
self.vprint("WARNING: %s" % str(msg))
elif opt == "-W":
wpargs = optarg.split(":")
elif opt == '-S':
libname, regex = optarg.split(':')
try:
for sym in self.trace.searchSymbols(regex, libname=libname):
symstr = str(sym)
symval = long(sym)
if self.trace.getBreakpointByAddr(symval) != None:
self.vprint('Duplicate (0x%.8x) %s' % (symval, symstr))
continue
bp = vtrace.Breakpoint(None, expression=symstr)
self.trace.addBreakpoint(bp)
self.vprint('Added: %s' % symstr)
except re.error, e:
self.vprint('Invalid Regular Expression: %s' % regex)
return
cmdstr = None
if len(args) > 1:
cmdstr = ' '.join(args[1:])
if len(args) >= 1:
arg = args[0]
if wpargs != None:
size = int(wpargs[1])
bp = vtrace.Watchpoint(None, expression=arg, size=size, perms=wpargs[0])
else:
bp = vtrace.Breakpoint(None, expression=arg)
bp.setBreakpointCode(pycode)
bp.fastbreak = fastbreak
bpid = self.trace.addBreakpoint(bp)
self.vdbUIEvent('vdb:addbreak', bpid)
if cmdstr:
self.bpcmds[bpid] = cmdstr.replace(';;', '&&')
self.vprint(" [ Breakpoints ]")
for bp in self.trace.getBreakpoints():
self._print_bp(bp)
def _print_bp(self, bp):
cmdstr = self.bpcmds.get(bp.id, '')
self.vprint("%s enabled: %s fast: %s %s" % (bp, bp.isEnabled(), bp.fastbreak, cmdstr))
def do_fds(self, args):
"""
Show all the open Handles/FileDescriptors for the target process.
The "typecode" shown in []'s is the vtrace typecode for that kind of
fd/handle.
Usage: fds
"""
self.trace.requireAttached()
for id,fdtype,fname in self.trace.getFds():
self.vprint("0x%.8x [%d] %s" % (id,fdtype,fname))
def do_ps(self, args):
"""
Show the current process list.
Usage: ps
"""
self.vprint("[Pid]\t[ Name ]")
for ps in self.trace.ps():
self.vprint("%s\t%s" % (ps[0],ps[1]))
def do_break(self, args):
"""
Send the break signal to the target tracer to stop
it's execution.
Usage: break
"""
if self.trace.getMeta('PendingBreak'):
self.vprint('Break already sent...')
return
self.trace.setMeta('PendingBreak', True)
self.trace.setMode("RunForever", False)
self.trace.sendBreak()
def do_meta(self, line):
"""
Show the metadata for the current trace.
Usage: meta
"""
argv = e_cli.splitargs(line)
if argv:
for name in argv:
mval = self.trace.getMeta(name)
self.vprint('%s: %r' % (name, mval))
else:
meta = self.trace.metadata
x = pprint.pformat(meta)
self.vprint(x)
def do_memdiff(self, line):
"""
Save and compare snapshots of memory to enumerate changes.
Usage: memdiff [options]
-C Clear all current memory diff snapshots.
-A <va:size> Add the given virtual address to the list.
-M <va> Add the entire memory map which contains VA to the list.
-D Compare currently tracked memory with the target process
and show any differences.
"""
argv = e_cli.splitargs(line)
opts,args = getopt(argv, "A:CDM:")
if len(opts) == 0:
return self.do_help('memdiff')
self.trace.requireNotRunning()
for opt,optarg in opts:
if opt == "-A":
if optarg.find(':') == -1:
return self.do_help('memdiff')
vastr,sizestr = optarg.split(':')
va = self.parseExpression(vastr)
size = self.parseExpression(sizestr)
bytez = self.trace.readMemory(va,size)
self.difftracks[va] = bytez
elif opt == '-C':
self.difftracks = {}
elif opt == '-D':
difs = self._getDiffs()
if len(difs) == 0:
self.vprint('No Differences!')
else:
for va,thenbytes,nowbytes in difs:
self.vprint('0x%.8x: %s %s' %
(va,
thenbytes.encode('hex'),
nowbytes.encode('hex')))
elif opt == '-M':
va = self.parseExpression(optarg)
mmap = self.trace.getMemoryMap(va)
if mmap == None:
self.vprint('No Memory Map At: 0x%.8x' % va)
return
mva,msize,mperm,mfile = mmap
bytez = self.trace.readMemory(mva, msize)
self.difftracks[mva] = bytez
def _getDiffs(self):
ret = []
for va, bytez in self.difftracks.items():
nowbytez = self.trace.readMemory(va, len(bytez))
i = 0
while i < len(bytez):
thendiff = ""
nowdiff = ""
iva = va+i
while (i < len(bytez) and
bytez[i] != nowbytez[i]):
thendiff += bytez[i]
nowdiff += nowbytez[i]
i += 1
if thendiff:
ret.append((iva, thendiff, nowdiff))
continue
i += 1
return ret
def do_dope(self, line):
'''
Cli interface to the "stack doping" api inside recon. *BETA*
(Basically, set all un-initialized stack memory to V's to tease
out uninitialized stack bugs)
Usage: dope [ options ]
-E Enable automagic thread stack doping on all continue events
-D Disable automagic thread stack doping on all continue events
-A Dope all current thread stacks
'''
import vdb.recon.dopestack as vr_dopestack
argv = e_cli.splitargs(line)
if len(argv) == 0:
return self.do_help('dope')
opts,args = getopt(argv, 'ADE')
if len(opts) == 0:
return self.do_help('dope')
for opt, optarg in opts:
if opt == '-A':
self.vprint('Doping all thread stacks...')
vr_dopestack.dopeAllThreadStacks(self.trace)
self.vprint('...complete!')
elif opt == '-D':
self.vprint('Disabling thread doping...')
vr_dopestack.disableEventDoping(self.trace)
self.vprint('...complete!')
elif opt == '-E':
self.vprint('Enabling thread doping on CONTINUE events...')
vr_dopestack.enableEventDoping(self.trace)
self.vprint('...complete!')
def do_recon(self, line):
'''
Cli front end to the vdb recon subsystem which allows runtime
analysis of known API calls.
Usage: recon [options]
-A <sym_expr>:<recon_fmt> - Add a recon breakpoint with the given format
-C - Clear the current list of recon breakpoint hits.
-H - Print the current list of recon breakpoint hits.
-Q - Toggle "quiet" mode which prints nothing on bp hits.
-S <sym_expr>:<argidx> - Add a sniper break for arg index
NOTE: A "recon format" is a special format sequence which tells the
recon subsystem how to present the argument data for a given
breakpoint hit.
Recon Format:
C - A character
I - A decimal integer
P - A pointer (display symbol if possible)
S - An ascii string (up to 260 chars)
U - A unicode string (up to 260 chars)
X - A hex number
'''
import vdb.recon as v_recon
import vdb.recon.sniper as v_sniper
argv = e_cli.splitargs(line)
if len(argv) == 0:
return self.do_help('recon')
if self.trace.getMeta('Architecture') != 'i386':
self.vprint('FIXME: recon only works on i386 right now...')
return
opts,args = getopt(argv, 'A:CHQS:')
for opt, optarg in opts:
if opt == '-A':
symname, reconfmt = optarg.split(':', 1)
v_recon.addReconBreak(self.trace, symname, reconfmt)
elif opt == '-C':
v_recon.clearReconHits(self.trace)
elif opt == '-H':
self.vprint('Recon Hits:')
hits = v_recon.getReconHits(self.trace)
for hit in hits:
thrid, savedeip, symname, args, argrep = hit
argstr = '(%s)' % ', '.join(argrep)
self.vprint('[%6d] 0x%.8x %s%s' % (thrid, savedeip, symname, argstr))
self.vprint('%d total hits' % len(hits))
elif opt == '-Q':
newval = not self.trace.getMeta('recon_quiet', False)
self.trace.setMeta('recon_quiet', newval)
self.vprint('Recon Quiet: %s' % newval)
elif opt == '-S':
symname, idxstr = optarg.split(':')
argidx = self.trace.parseExpression(idxstr)
v_sniper.snipeDynArg(self.trace, symname, argidx)
def do_stalker(self, line):
'''
Cli front end to the VDB code coverage subsystem. FIXME MORE DOCS!
Usage: stalker [options]
-C - Cleanup stalker breaks and hit info
-c - Clear the current hits (so you can make more ;)
-E <addr_expr> - Add the specified entry point for tracking
-H - Show the current hits
-L <lib>:<regex> - Add stalker breaks to all matching library symbols
-R - Reset all breakpoints to enabled and clear hit info
'''
argv = e_cli.splitargs(line)
if len(argv) == 0:
return self.do_help('stalker')
try:
opts,args = getopt(argv, 'cCE:HIL:R')
except Exception ,e:
return self.do_help('stalker')
trace = self.trace
for opt, optarg in opts:
if opt == '-c':
v_stalker.clearStalkerHits(trace)
self.vprint('Clearing Stalker Hits...')
elif opt == '-C':
v_stalker.clearStalkerBreaks(trace)
v_stalker.clearStalkerHits(trace)
self.vprint('Cleaning up stalker breaks and hits')
elif opt == '-E':
addr = trace.parseExpression(optarg)
v_stalker.addStalkerEntry(trace, addr)
self.vprint('Added 0x%.8x' % addr)
elif opt == '-H':
self.vprint('Current Stalker Hits:')
for hitva in v_stalker.getStalkerHits(trace):
self.vprint('0x%.8x' % hitva)
elif opt == '-L':
libname, regex = optarg.split(':', 1)
for sym in trace.searchSymbols(regex, libname=libname):
v_stalker.addStalkerEntry(trace, long(sym))
self.vprint('Stalking %s' % str(sym))
elif opt == '-R':
self.vprint('Resetting all breaks and hit info')
v_stalker.clearStalkerHits(trace)
v_stalker.resetStalkerBreaks(trace)
def do_status(self, line):
'''
Print out the status of the debugger / trace...
'''
t = self.getTrace()
if not t.isAttached():
self.vprint('Trace Not Attached...')
return
runmsg = 'stopped'
if t.isRunning():
runmsg = 'running'
pid = t.getPid()
self.vprint('Attached to pid: %d (%s)' % (pid, runmsg))
def _getFirstLine(self, line):
'''
Returns the first non-empty line in a (potentially) empty or
multiline string. An empty line is returned for a None string or
if all lines are empty.
'''
if line == None:
return ''
lines = line.split('\n')
lines = [line.strip() for line in lines]
for line in lines:
if line != '':
return line
return ''
def _getCommandHelp(self):
'''
Returns a list of command name, doc first line, and doc string tuples.
(sorted by command name)
We'll need this later anyway when we implement our own groups of
commands.
'''
# commands can be docstrings or have help_<cmd> methods.
HELP_DOCS = 0 # help docstring
HELP_FUNC = 1 # help function (precedence over docstring)
cmds = defaultdict(list)
for name in dir(self):
if name.startswith('help_'):
hstr = getattr(self, name)()
cmds[name[5:]].append( (HELP_FUNC, hstr) )
elif name.startswith('do_'):
hstr = getattr(self, name).__doc__
cmds[name[3:]].append( (HELP_DOCS, hstr) )
else:
pass
rcmds = []
for cname, clist in cmds.iteritems():
if len(clist) > 2:
raise Exception('how do we handle inherited overridden help')
# find the right help string.
# pull out HELP_FUNC if it exists, otherwise use HELP_DOCS
if len(clist) == 2:
chelp = max(clist, key=lambda x: x[0])[1].strip()
else:
chelp = clist[0][1].strip()
# maybe change to first line or first sentence in line?
fline = self._getFirstLine(chelp)
rcmds.append( (cname, fline, chelp) )
rcmds.sort()
return rcmds
def do_help(self, line):
'''
Prints a list of commands and further help depending on the options.
Usage: help [options] [string]
Usage: ? [options] [string]
no opts/args prints all commands
<command> help for the command
-s one line of help for each command
-s <string> one line of help per command for commands that contain
string
-k <string> all help per command for commands that contain <string>
'''
argv = shlex.split(line)
if len(argv) == 0:
return e_cli.EnviMutableCli.do_help(self, line)
if argv[0] not in ('-k', '-s'):
return e_cli.EnviMutableCli.do_help(self, line)
# this gets a tad messy.
if argv[0] == '-s' and len(argv) == 1:
ctups = self._getCommandHelp()
for ctup in ctups:
self.vprint('%15s: %s' % (ctup[0], ctup[1]))
elif argv[0] == '-s' and len(argv) == 2:
ctups = self._getCommandHelp()
for ctup in ctups:
if (argv[1] in ctup[0]) or (argv[1] in ctup[2]):
self.vprint('%s: %s' % (ctup[0], ctup[1]))
elif argv[0] == '-k' and len(argv) == 2:
ctups = self._getCommandHelp()
for ctup in ctups:
if (argv[1] in ctup[0]) or (argv[1] in ctup[2]):
self.vprint('> help %s\n' % ctup[0])
self.vprint(' %s\n' % ctup[2])
else:
self.vprint(self.do_help.__doc__)
def FIXME_do_remote(self, line):
"""
Act as a remote debugging client to the server running on
the specified host/ip.
Usage: remote <host>
"""
vtrace.remote = line
# FIXME how do we re-init the debugger?
# Some helper functions for tab completion
def _complete_libname(self, text, line, begidx, endidx):
libnames = self.trace.getNormalizedLibNames()
if not text:
return libnames
return [ i for i in libnames if i.startswith( text ) ]
|
{
"content_hash": "559543c8e52ebc32b1d3bbb713a49224",
"timestamp": "",
"source": "github",
"line_count": 2245,
"max_line_length": 141,
"avg_line_length": 31.564810690423162,
"alnum_prop": 0.5198058225025753,
"repo_name": "HackerTool/vivisect",
"id": "ce5ada3c26b2086c9b370e95595cb9aca12cc6a6",
"size": "70863",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vdb/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11384786"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.ndimage.interpolation import rotate
class Plotter(object):
def __init__(self,toption,input_type,u,v,A,T,div=None,q=None,angle=0.0,fig_size=(8,8),print_format='eps',print_dpi=1000,**kwargs):
self.toption = toption
self.input_type = input_type
self.u = u
self.v = v
self.A = A
self.T = T
#Configure logger
self.logger = logging.getLogger(type(self).__name__)
if div is not None:
self.div = div
if not q:
self.q = self.u.shape[1]
else:
self.q = q
if self.input_type == 'matrix':
self.ny,self.nx = np.shape(T)
else:
self.ny,self.nx = np.shape(T)[0],np.shape(T)[0]
#set optional member variables
self.angle = angle
self.print_format = print_format
self.print_dpi = print_dpi
self.fs = 18
self.cm = 'Blues'
self.zero_tol = 1.e-5
self.fig_size = fig_size
self.yaxis_format = FormatStrFormatter('%3.1f')
#Preprocessing
self.A = self.rotate_back(A)
self.get_components()
#Check data type
if self.toption == 'simulation':
try:
self.target = kwargs['target']
except:
raise ValueError("Please specify list of target sources when plotting simulation results.")
if self.input_type == 'timeseries':
try:
self.ny = kwargs['Tmat'].shape[0]
except:
raise ValueError("Please specify matrix representation of time series when using 1D representation.")
def rotate_back(self,mat):
"""Rotate back and reshape"""
#rotate matrix back
if self.angle != 0.0:
mat_rot = rotate(mat,-self.angle)
else:
mat_rot = mat
#get indices of rotated matrix
ny_r,nx_r = mat_rot.shape
#calculate differences
delta_y = int(np.round((ny_r - self.ny)/2.0))
delta_x = int(np.round((nx_r - self.nx)/2.0))
#Return cut and rotated matrix
res = mat_rot[delta_y:((ny_r - delta_y)),delta_x:((nx_r - delta_x))]
if not np.shape(res) == (self.ny,self.nx):
self.logger.warning("Rotated dimensions do not match original dimensions; (%d,%d) != (ny=%d,nx=%d)"%(np.shape(res)[0],np.shape(res)[1],self.ny,self.nx))
self.logger.warning("Adjusting dimensions for compatibility.")
diff_row = self.ny - np.shape(res)[0]
diff_col = self.nx - np.shape(res)[1]
#adjust row dim
if diff_row < 0:
#remove last row
res = res[:self.ny,:]
elif diff_row > 0:
#add last row below zero tol
res = np.vstack([res,0.9*self.zero_tol*np.ones(np.shape(res)[1])])
else:
self.logger.warning("No adjustment on rows, %d==%d"%(np.shape(res)[0],self.ny))
#adjust col dim
if diff_col < 0:
#remove last col
res = res[:,:self.nx]
elif diff_col > 0:
res = np.hstack([res,0.9*self.zero_tol*np.ones([np.shape(res)[0],1])])
else:
self.logger.warning("No adjustment on columns, %d==%d"%(np.shape(res)[1],self.nx))
return res
def get_components(self):
"""Separate A matrix into components"""
self.components = []
for i in range(self.q):
self.components.append(self.rotate_back(np.outer(self.u[:,i],self.v[i,:])))
def plot_obs_pred_total(self,**kwargs):
"""Plot original observation and recovered result"""
if self.input_type == 'matrix':
fig,ax = plt.subplots(1,2,figsize=self.fig_size)
plt.tight_layout()
imT = ax[0].imshow(np.ma.masked_where(self.T<self.zero_tol*np.max(self.T),self.T),cmap=self.cm)
imA = ax[1].imshow(np.ma.masked_where(self.A<self.zero_tol*np.max(self.A),self.A),cmap=self.cm)
cbar1 = fig.colorbar(imT,cax=make_axes_locatable(ax[0]).append_axes("right","5%",pad="3%"),ticks=[np.min(self.T),(np.max(self.T)-np.min(self.T))/2.0,np.max(self.T)],format=self.yaxis_format)
cbar2 = fig.colorbar(imA,cax=make_axes_locatable(ax[1]).append_axes("right","5%",pad="3%"),ticks=[np.min(self.A),(np.max(self.A)-np.min(self.A))/2.0,np.max(self.A)],format=self.yaxis_format)
if 'peak_id' in kwargs and kwargs['peak_id']:
self.logger.info("Finding peaks from separated images")
self.source_id()
ax[0].scatter(x=self.peak_id[:,1],y=self.peak_id[:,0],c='white',marker='x',s=15)
ax[0].set_title(r'$T$, Observation',fontsize=self.fs)
ax[1].set_title(r'$A$, Prediction',fontsize=self.fs)
ax[0].set_xlim([0,np.shape(self.T)[1]])
ax[0].set_ylim([0,np.shape(self.T)[0]])
ax[1].set_xlim([0,np.shape(self.A)[1]])
ax[1].set_ylim([0,np.shape(self.A)[0]])
ax[0].set_yticks([])
ax[0].set_xticks([])
ax[1].set_yticks([])
ax[1].set_xticks([])
elif self.input_type == 'timeseries':
fig = plt.figure(figsize=self.fig_size)
ax = fig.gca()
plt.tight_layout()
ax.plot(self.T,'.k',label='Observation')
ax.plot(self.A[self.timeseries_cut(self.A),:],'r',label='Prediction')
ax.set_xlabel(r'$t$ (au)',fontsize=self.fs)
ax.set_ylabel(r'$I$ (au)',fontsize=self.fs)
ax.set_ylim([0,1])
ax.set_xlim([0,len(self.T)])
ax.set_yticks([np.min(self.T),(np.max(self.T)-np.min(self.T))/2.0,np.max(self.T)])
ax.yaxis.set_major_formatter(self.yaxis_format)
ax.legend(loc='best')
else:
raise ValueError("Invalid input type option.")
if 'print_fig_filename' in kwargs:
plt.savefig(kwargs['print_fig_filename']+'.'+self.print_format,format=self.print_format,dpi=self.print_dpi)
plt.close('all')
else:
plt.show()
def plot_obs_pred_sources(self,**kwargs):
"""Plot recovered sources against the original components"""
try:
rows = max(self.q,len(self.target))
pairs = self.match_closest()
except:
self.logger.exception("Cannot match pairs.")
rows = self.q
pairs = []
[pairs.append((i,i)) for i in range(self.q)]
if self.input_type == 'matrix':
fig,ax = plt.subplots(2,rows,figsize=self.fig_size,sharex=True,sharey=True)
plt.tight_layout()
ax[0,0].set_ylabel(r'Sources',fontsize=self.fs)
ax[1,0].set_ylabel(r'Predictions',fontsize=self.fs)
for i in range(rows):
try:
tmp_mask = np.ma.masked_where(self.target[pairs[i][0]]<self.zero_tol*np.max(self.target[pairs[i][0]]),self.target[pairs[i][0]])
im = ax[0,i].imshow(tmp_mask,cmap=self.cm)
ax[0,i].set_xlim([0,np.shape(self.target[pairs[i][0]])[1]])
ax[0,i].set_ylim([0,np.shape(self.target[pairs[i][0]])[0]])
ax[0,i].set_xticks([0,int(np.shape(self.target[pairs[i][0]])[1]/2),np.shape(self.target[pairs[i][0]])[1]])
ax[0,i].set_yticks([0,int(np.shape(self.target[pairs[i][0]])[0]/2),np.shape(self.target[pairs[i][0]])[0]])
except:
self.logger.exception("Skipping source entry %d, out of range."%i)
try:
tmp_mask = np.ma.masked_where(self.components[pairs[i][1]]<self.zero_tol*np.max(self.components[pairs[i][1]]),self.components[pairs[i][1]])
im = ax[1,i].imshow(tmp_mask,cmap=self.cm)
ax[1,i].set_xlim([0,np.shape(self.target[pairs[i][1]])[1]])
ax[1,i].set_ylim([0,np.shape(self.target[pairs[i][1]])[0]])
ax[1,i].set_xticks([0,int(np.shape(self.target[pairs[i][1]])[1]/2),np.shape(self.target[pairs[i][1]])[1]])
ax[1,i].set_yticks([0,int(np.shape(self.target[pairs[i][1]])[0]/2),np.shape(self.target[pairs[i][1]])[0]])
except:
self.logger.exception("Skipping source entry %d, out of range."%i)
elif self.input_type == 'timeseries':
fig,ax = plt.subplots(rows,1,figsize=self.fig_size,sharex=True,sharey=True)
plt.tight_layout()
for i in range(rows):
try:
ax[i].plot(self.target[pairs[i][0]],'.k',label='source')
except:
self.logger.exception("Skipping source entry %d, out of range."%i)
try:
ax[i].plot(self.components[pairs[i][1]][self.timeseries_cut(self.components[pairs[i][1]]),:],'r',label='prediction')
#ax[i].set_yticks([0.0,(np.max(self.components[pairs[i][1]][self.ts_cut,:]) - np.min(self.components[pairs[i][1]][self.ts_cut,:]))/2.0,np.max(self.components[pairs[i][1]][self.ts_cut,:])])
ax[i].yaxis.set_major_formatter(self.yaxis_format)
ax[i].set_ylim([0,1])
except:
self.logger.exception("Skipping source entry %d, out of range."%i)
fig.text(0.001, 0.5, r'$I$ $\mathrm{(au)}$', ha='center',
va='center', rotation='vertical',fontsize=self.fs)
ax[-1].set_xlabel(r'$t$ $\mathrm{(au)}$',fontsize=self.fs)
ax[0].legend(loc='best')
else:
raise ValueError("Invalid input type option")
if 'print_fig_filename' in kwargs:
plt.savefig(kwargs['print_fig_filename']+'.'+self.print_format,format=self.print_format,dpi=self.print_dpi)
plt.close('all')
else:
plt.show()
def plot_obs_pred_total_sources_ts(self,**kwargs):
"""Plot sources + total for observation and prediction"""
fig = plt.figure(figsize=self.fig_size)
ax = fig.gca()
ax.plot(self.T,'.k',label='Observation')
ax.plot(self.A[self.timeseries_cut(self.A),:],'r',label='Prediction')
for i in range(self.q):
ax.plot(self.components[i][self.timeseries_cut(self.components[i]),:],'--b')
ax.set_xlabel(r'$t$ (au)',fontsize=self.fs)
ax.set_ylabel(r'$I$ (au)',fontsize=self.fs)
ax.yaxis.set_major_formatter(self.yaxis_format)
ax.set_ylim([0,1])
ax.set_xlim([0,len(self.T)])
ax.legend(loc='best')
if 'print_fig_filename' in kwargs:
plt.savefig(kwargs['print_fig_filename']+'.'+self.print_format,format=self.print_format,dpi=self.print_dpi)
plt.close('all')
else:
plt.show()
def plot_div(self,**kwargs):
"""Plot divergence metric as function of iteration"""
try:
fig = plt.figure(figsize=self.fig_size)
ax = fig.gca()
ax.plot(self.div)
ax.set_yscale('log')
ax.set_xlim([0,len(self.div)])
ax.set_ylim([np.min(self.div),np.max(self.div)])
ax.set_title(r'Divergence Measure',fontsize=self.fs)
ax.set_xlabel(r'iteration',fontsize=self.fs)
ax.set_ylabel(r'$d(T,A)$',fontsize=self.fs)
except AttributeError:
self.logger.exception("Cannot plot divergence metric. self.div not set.")
return
if 'print_fig_filename' in kwargs:
plt.savefig(kwargs['print_fig_filename']+'.'+self.print_format,format=self.print_format,dpi=self.print_dpi)
plt.close('all')
else:
plt.show()
def match_closest(self):
"""Create list of pairs of components and targets so that the plots correspond"""
sources = []
[sources.append(k) for k in range(self.q)]
pairs = []
i_target = 0
while sources != []:
min_diff = 1.0e+300
pairs.append(([],[]))
for i in sources:
if self.input_type == 'matrix':
diff = np.mean(np.fabs(self.target[i_target] - self.components[i]))
else:
diff = np.mean(np.fabs(self.target[i_target] - self.components[i][self.timeseries_cut(self.components[i]),:]))
if diff < min_diff:
pairs.pop()
pairs.append((i_target,i))
min_diff = diff
sources.remove(pairs[i_target][1])
i_target += 1
return pairs
def source_id(self):
"""Find sources in image by picking out peak coordinates in target image"""
tmp = []
for c in self.components:
centers = np.unravel_index(np.argmax(c),np.shape(c))
self.logger.debug("(%d,%d)"%centers)
tmp.append(centers)
self.peak_id = np.array(tmp)
def timeseries_cut(self,mat):
"""Get row index for which matrix is maximum."""
return np.unravel_index(mat.argmax(),mat.shape)[0]
|
{
"content_hash": "073a32ee6e9344ec73fde0dbba3c65ee",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 208,
"avg_line_length": 44.250803858520904,
"alnum_prop": 0.5288475512280192,
"repo_name": "wtbarnes/solarnmf",
"id": "ca784fb04fb207ddaa0416b4fef2ed596d51d7d1",
"size": "13813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solarnmf/solarnmf_plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44343"
}
],
"symlink_target": ""
}
|
import tkinter
from tkinter.filedialog import askdirectory
class File_dialog2():
def file(self):
root = tkinter.Tk()
root.withdraw()
with open("path_output.txt", "w") as file:
filename = askdirectory()
file.write(filename)
|
{
"content_hash": "8f934af8f2a2f6235f0711b014eed0d6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 50,
"avg_line_length": 19.733333333333334,
"alnum_prop": 0.5709459459459459,
"repo_name": "vetlehjelmtvedt/TranscriptApp",
"id": "6071aa623c08d1569fe04940de9a2351a0358933",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_dialog2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20192"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('satsound', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='satellitetrajectory',
name='observer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='satsound.Observer'),
preserve_default=False,
),
migrations.AlterField(
model_name='satellite',
name='tle',
field=models.CharField(blank=True, max_length=164, verbose_name='two-line element'),
),
]
|
{
"content_hash": "9fbbfa94cc4be17595dfab2068905cba",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 116,
"avg_line_length": 29.75,
"alnum_prop": 0.6162464985994398,
"repo_name": "saanobhaai/apman",
"id": "47de195050b2ce241f5bd06bf41a7f034574533c",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satsound/migrations/0002_auto_20161110_1753.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11257"
},
{
"name": "Python",
"bytes": "55713"
}
],
"symlink_target": ""
}
|
"""
Module for implementing a CTRL file object class for the Stuttgart
LMTO-ASA code. It will primarily be used to generate a pymatgen
Structure object in the pymatgen.electronic_structure.cohp.py module.
"""
import re
import numpy as np
from monty.io import zopen
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ry_to_eV, bohr_to_angstrom
from pymatgen.electronic_structure.core import Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.num import round_to_sigfigs
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
class LMTOCtrl:
"""
Class for parsing CTRL files from the Stuttgart LMTO-ASA code.
Currently, only HEADER, VERS and the structure can be used.
"""
def __init__(self, structure, header=None, version="LMASA-47"):
"""
Args:
structure: The structure as a pymatgen Structure object.
header: The header for the CTRL file .
Defaults to None.
version: The LMTO version that is used for the VERS category.
Defaults to the newest version (4.7).
"""
self.structure = structure
self.header = header
self.version = version
def __eq__(self, other):
return self.get_string() == other.get_string()
def __repr__(self):
"""
Representation of the CTRL file is as a string.
"""
return self.get_string()
def __str__(self):
"""
String representation of the CTRL file.
"""
return self.get_string()
def get_string(self, sigfigs=8):
"""
Generates the string representation of the CTRL file. This is
the mininmal CTRL file necessary to execute lmhart.run.
"""
ctrl_dict = self.as_dict()
lines = [] if "HEADER" not in ctrl_dict else ["HEADER".ljust(10) + self.header]
if "VERS" in ctrl_dict:
lines.append("VERS".ljust(10) + self.version)
lines.append("STRUC".ljust(10) + "ALAT=" + str(round(ctrl_dict["ALAT"], sigfigs)))
for l, latt in enumerate(ctrl_dict["PLAT"]):
if l == 0:
line = "PLAT=".rjust(15)
else:
line = " ".ljust(15)
line += " ".join([str(round(v, sigfigs)) for v in latt])
lines.append(line)
for cat in ["CLASS", "SITE"]:
for a, atoms in enumerate(ctrl_dict[cat]):
if a == 0:
line = [cat.ljust(9)]
else:
line = [" ".ljust(9)]
for token, val in sorted(atoms.items()):
if token == "POS":
line.append("POS=" + " ".join([str(round(p, sigfigs)) for p in val]))
else:
line.append(token + "=" + str(val))
line = " ".join(line)
lines.append(line)
return "\n".join(lines) + "\n"
def as_dict(self):
"""
Returns the CTRL as a dictionary. "SITE" and "CLASS" are of
the form {'CATEGORY': {'TOKEN': value}}, the rest is of the
form 'TOKEN'/'CATEGORY': value. It gets the conventional standard
structure because primitive cells use the conventional
a-lattice parameter as the scaling factor and not the a-lattice
parameter of the primitive cell.
"""
ctrl_dict = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if self.header is not None:
ctrl_dict["HEADER"] = self.header
if self.version is not None:
ctrl_dict["VERS"] = self.version
sga = SpacegroupAnalyzer(self.structure)
alat = sga.get_conventional_standard_structure().lattice.a
plat = self.structure.lattice.matrix / alat
"""
The following is to find the classes (atoms that are not symmetry
equivalent, and create labels. Note that LMTO only attaches
numbers with the second atom of the same species, e.g. "Bi", "Bi1",
"Bi2", etc.
"""
eq_atoms = sga.get_symmetry_dataset()["equivalent_atoms"]
ineq_sites_index = list(set(eq_atoms))
sites = []
classes = []
num_atoms = {}
for s, site in enumerate(self.structure.sites):
atom = site.specie
label_index = ineq_sites_index.index(eq_atoms[s])
if atom.symbol in num_atoms:
if label_index + 1 > sum(num_atoms.values()):
num_atoms[atom.symbol] += 1
atom_label = atom.symbol + str(num_atoms[atom.symbol] - 1)
classes.append({"ATOM": atom_label, "Z": atom.Z})
else:
num_atoms[atom.symbol] = 1
classes.append({"ATOM": atom.symbol, "Z": atom.Z})
sites.append({"ATOM": classes[label_index]["ATOM"], "POS": site.coords / alat})
ctrl_dict.update(
{
"ALAT": alat / bohr_to_angstrom,
"PLAT": plat,
"CLASS": classes,
"SITE": sites,
}
)
return ctrl_dict
def write_file(self, filename="CTRL", **kwargs):
"""
Writes a CTRL file with structure, HEADER, and VERS that can be
used as input for lmhart.run.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
@classmethod
def from_file(cls, filename="CTRL", **kwargs):
"""
Creates a CTRL file object from an existing file.
Args:
filename: The name of the CTRL file. Defaults to 'CTRL'.
Returns:
An LMTOCtrl object.
"""
with zopen(filename, "rt") as f:
contents = f.read()
return LMTOCtrl.from_string(contents, **kwargs)
@classmethod
def from_string(cls, data, sigfigs=8):
"""
Creates a CTRL file object from a string. This will mostly be
used to read an LMTOCtrl object from a CTRL file. Empty spheres
are ignored.
Args:
data: String representation of the CTRL file.
Returns:
An LMTOCtrl object.
"""
lines = data.split("\n")[:-1]
struc_lines = {
"HEADER": [],
"VERS": [],
"SYMGRP": [],
"STRUC": [],
"CLASS": [],
"SITE": [],
}
for line in lines:
if line != "" and not line.isspace():
if not line[0].isspace():
cat = line.split()[0]
if cat in struc_lines:
struc_lines[cat].append(line)
else:
pass
for cat in struc_lines:
struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=")
structure_tokens = {"ALAT": None, "PLAT": [], "CLASS": [], "SITE": []}
for cat in ["STRUC", "CLASS", "SITE"]:
fields = struc_lines[cat].split("=") # pylint: disable=E1101
for f, field in enumerate(fields):
token = field.split()[-1]
if token == "ALAT":
alat = round(float(fields[f + 1].split()[0]), sigfigs)
structure_tokens["ALAT"] = alat
elif token == "ATOM":
atom = fields[f + 1].split()[0]
if not bool(re.match("E[0-9]*$", atom)):
if cat == "CLASS":
structure_tokens["CLASS"].append(atom)
else:
structure_tokens["SITE"].append({"ATOM": atom})
else:
pass
elif token in ["PLAT", "POS"]:
try:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()])
except ValueError:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()[:-1]])
if token == "PLAT":
structure_tokens["PLAT"] = arr.reshape([3, 3])
elif not bool(re.match("E[0-9]*$", atom)):
structure_tokens["SITE"][-1]["POS"] = arr
else:
pass
else:
pass
try:
spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP")
spcgrp = struc_lines["SYMGRP"][spcgrp_index : spcgrp_index + 12]
structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0]
except ValueError:
pass
for token in ["HEADER", "VERS"]:
try:
value = re.split(token + r"\s*", struc_lines[token])[1]
structure_tokens[token] = value.strip()
except IndexError:
pass
return LMTOCtrl.from_dict(structure_tokens)
@classmethod
def from_dict(cls, d):
"""
Creates a CTRL file object from a dictionary. The dictionary
must contain the items "ALAT", PLAT" and "SITE".
Valid dictionary items are:
ALAT: the a-lattice parameter
PLAT: (3x3) array for the lattice vectors
SITE: list of dictionaries: {'ATOM': class label,
'POS': (3x1) array of fractional
coordinates}
CLASS (optional): list of unique atom labels as str
SPCGRP (optional): space group symbol (str) or number (int)
HEADER (optional): HEADER text as a str
VERS (optional): LMTO version as a str
Args:
d: The CTRL file as a dictionary.
Returns:
An LMTOCtrl object.
"""
for cat in ["HEADER", "VERS"]:
if cat not in d:
d[cat] = None
alat = d["ALAT"] * bohr_to_angstrom
plat = d["PLAT"] * alat
species = []
positions = []
for site in d["SITE"]:
species.append(re.split("[0-9*]", site["ATOM"])[0])
positions.append(site["POS"] * alat)
# Only check if the structure is to be generated from the space
# group if the number of sites is the same as the number of classes.
# If lattice and the spacegroup don't match, assume it's primitive.
if "CLASS" in d and "SPCGRP" in d and len(d["SITE"]) == len(d["CLASS"]):
try:
structure = Structure.from_spacegroup(d["SPCGRP"], plat, species, positions, coords_are_cartesian=True)
except ValueError:
structure = Structure(
plat,
species,
positions,
coords_are_cartesian=True,
to_unit_cell=True,
)
else:
structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True)
return cls(structure, header=d["HEADER"], version=d["VERS"])
class LMTOCopl:
"""
Class for reading COPL files, which contain COHP data.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length}
.. attribute: efermi
The Fermi energy in Ry or eV.
.. attribute: energies
Sequence of energies in Ry or eV.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
"""
def __init__(self, filename="COPL", to_eV=False):
"""
Args:
filename: filename of the COPL file. Defaults to "COPL".
to_eV: LMTO-ASA gives energies in Ry. To convert energies into
eV, set to True. Defaults to False for energies in Ry.
"""
# COPL files have an extra trailing blank line
with zopen(filename, "rt") as f:
contents = f.read().split("\n")[:-1]
# The parameters line is the second line in a COPL file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
num_bonds = int(parameters[0])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 2 :]]).transpose()
if to_eV:
# LMTO energies have 5 sig figs
self.energies = np.array(
[round_to_sigfigs(energy, 5) for energy in data[0] * Ry_to_eV],
dtype=float,
)
self.efermi = round_to_sigfigs(float(parameters[-1]) * Ry_to_eV, 5)
else:
self.energies = data[0]
self.efermi = float(parameters[-1])
cohp_data = {}
for bond in range(num_bonds):
label, length, sites = self._get_bond_data(contents[2 + bond])
cohp = {spin: data[2 * (bond + s * num_bonds) + 1] for s, spin in enumerate(spins)}
if to_eV:
icohp = {
spin: np.array([round_to_sigfigs(i, 5) for i in data[2 * (bond + s * num_bonds) + 2] * Ry_to_eV])
for s, spin in enumerate(spins)
}
else:
icohp = {spin: data[2 * (bond + s * num_bonds) + 2] for s, spin in enumerate(spins)}
# This takes care of duplicate labels
if label in cohp_data:
i = 1
lab = "%s-%d" % (label, i)
while lab in cohp_data:
i += 1
lab = "%s-%d" % (label, i)
label = lab
cohp_data[label] = {
"COHP": cohp,
"ICOHP": icohp,
"length": length,
"sites": sites,
}
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line):
"""
Subroutine to extract bond label, site indices, and length from
a COPL header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.
Args:
line: line in the COHPCAR header describing the bond.
Returns:
The bond label, the bond length and a tuple of the site
indices.
"""
line = line.split()
length = float(line[2])
# Replacing "/" with "-" makes splitting easier
sites = line[0].replace("/", "-").split("-")
site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])
species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2])
label = "%s%d-%s%d" % (
species[0],
site_indices[0] + 1,
species[1],
site_indices[1] + 1,
)
return label, length, site_indices
|
{
"content_hash": "3aa449094065fd875b62af0c2d0a07f2",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 119,
"avg_line_length": 36.058411214953274,
"alnum_prop": 0.5095574418453962,
"repo_name": "vorwerkc/pymatgen",
"id": "53de0b399a6faaf2c4e03fe9a91295d92e09e1e8",
"size": "15527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/lmto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38792"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "Python",
"bytes": "8941675"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12010"
}
],
"symlink_target": ""
}
|
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'pip_install'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Release Notes: ' + role_name + 'role'
# The link to the browsable source code (for the left hand menu)
oslosphinx_cgit_link = 'https://git.openstack.org/cgit/openstack/' + target_name
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_info = pbr.version.VersionInfo(target_name)
# The full version, including alpha/beta/rc tags.
release = version_info.version_string_with_vcs()
# The short X.Y version.
version = version_info.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
|
{
"content_hash": "6b5f2bcad546a1db14800e01779d71a0",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 80,
"avg_line_length": 32.526923076923076,
"alnum_prop": 0.7008395412084664,
"repo_name": "os-cloud/openstack-ansible-pip_install",
"id": "9eb34abc7b12c111487befecf1c079360d78f4b0",
"size": "9309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "releasenotes/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10339"
},
{
"name": "Shell",
"bytes": "1927"
}
],
"symlink_target": ""
}
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
import sys
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute('SHOW TABLE STATUS WHERE Name="INTROSPECT_TEST"')
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1] != 'MyISAM'
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
connection_created.send(sender=self.__class__, connection=self)
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
|
{
"content_hash": "92be1c6ac6ae7fe8491666b52137aa22",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 127,
"avg_line_length": 40.48695652173913,
"alnum_prop": 0.6280068728522337,
"repo_name": "softak/webfaction_demo",
"id": "4f5f329b18e5224868ac49b784204a8f50180524",
"size": "13968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/django/db/backends/mysql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
import contextlib
import json
from .common import (assert_command, assert_lines, exec_command, show_app,
watch_all_deployments)
GOOD_GROUP = 'tests/data/marathon/groups/good.json'
def test_deploy_group():
_deploy_group(GOOD_GROUP)
_remove_group('test-group')
def test_group_list_table():
with _group(GOOD_GROUP, 'test-group'):
assert_lines(['dcos', 'marathon', 'group', 'list'], 3)
def test_validate_complicated_group_and_app():
_deploy_group('tests/data/marathon/groups/complicated.json')
_remove_group('test-group')
def test_optional_deploy_group():
_deploy_group(GOOD_GROUP, False)
_remove_group('test-group')
def test_add_existing_group():
with _group(GOOD_GROUP, 'test-group'):
with open(GOOD_GROUP) as fd:
stderr = b"Group '/test-group' already exists\n"
assert_command(['dcos', 'marathon', 'group', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_group():
with _group(GOOD_GROUP, 'test-group'):
_show_group('test-group')
def test_add_bad_app():
with open('tests/data/marathon/groups/bad_app.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'add'],
stdin=fd)
expected = "Error: Additional properties are not allowed" + \
" ('badtype' was unexpected)"
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(expected)
def test_add_bad_group():
with open('tests/data/marathon/groups/bad_group.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'add'],
stdin=fd)
expected = "Error: Additional properties are not allowed" + \
" ('fakeapp' was unexpected)"
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(expected)
def test_add_bad_complicated_group():
with open('tests/data/marathon/groups/complicated_bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'add'],
stdin=fd)
err = "Error: missing required property 'id'"
assert returncode == 1
assert stdout == b''
assert err in stderr.decode('utf-8')
def test_update_group():
with _group(GOOD_GROUP, 'test-group'):
newapp = json.dumps([{"id": "appadded", "cmd": "sleep 0"}])
appjson = "apps={}".format(newapp)
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'update', 'test-group/sleep',
appjson])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
watch_all_deployments()
show_app('test-group/sleep/appadded')
def test_update_group_from_stdin():
with _group(GOOD_GROUP, 'test-group'):
_update_group(
'test-group',
'tests/data/marathon/groups/update_good.json')
show_app('test-group/updated')
def test_update_missing_group():
assert_command(['dcos', 'marathon', 'group', 'update', 'missing-id'],
stderr=b"Error: Group '/missing-id' does not exist\n",
returncode=1)
def test_update_missing_field():
with _group(GOOD_GROUP, 'test-group'):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'update',
'test-group/sleep', 'missing="a string"'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: 'missing' is not a valid property. "
"Possible properties are: ")
def test_scale_group():
_deploy_group('tests/data/marathon/groups/scale.json')
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'group',
'scale', 'scale-group', '2'])
assert stderr == b''
assert returncode == 0
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'show',
'scale-group'])
res = json.loads(stdout.decode('utf-8'))
assert res['groups'][0]['apps'][0]['instances'] == 2
_remove_group('scale-group')
def test_scale_group_not_exist():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'group',
'scale', 'scale-group', '2'])
assert stderr == b''
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'show',
'scale-group'])
res = json.loads(stdout.decode('utf-8'))
assert len(res['apps']) == 0
_remove_group('scale-group')
def test_scale_group_when_scale_factor_negative():
_deploy_group('tests/data/marathon/groups/scale.json')
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'group',
'scale', 'scale-group', '-2'])
assert b'Command not recognized' in stdout
assert returncode == 1
watch_all_deployments()
_remove_group('scale-group')
def test_scale_group_when_scale_factor_not_float():
_deploy_group('tests/data/marathon/groups/scale.json')
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'group',
'scale', 'scale-group', '1.a'])
assert stderr == b'Error parsing string as float\n'
assert returncode == 1
watch_all_deployments()
_remove_group('scale-group')
def _remove_group(group_id):
assert_command(['dcos', 'marathon', 'group', 'remove', group_id])
# Let's make sure that we don't return until the deployment has finished
watch_all_deployments()
def _deploy_group(file_path, stdin=True):
if stdin:
with open(file_path) as fd:
assert_command(['dcos', 'marathon', 'group', 'add'], stdin=fd)
else:
assert_command(['dcos', 'marathon', 'group', 'add', file_path])
# Let's make sure that we don't return until the deployment has finished
watch_all_deployments()
def _show_group(group_id, version=None):
if version is None:
cmd = ['dcos', 'marathon', 'group', 'show', group_id]
else:
cmd = ['dcos', 'marathon', 'group', 'show',
'--group-version={}'.format(version), group_id]
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, dict)
assert result['id'] == '/' + group_id
assert stderr == b''
return result
def _update_group(group_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'group', 'update', group_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
# Let's make sure that we don't return until the deployment has finished
watch_all_deployments()
@contextlib.contextmanager
def _group(path, group_id):
"""Context manager that deploys a group on entrance, and removes it on
exit.
:param path: path to group's json definition
:type path: str
:param group_id: group id
:type group_id: str
:rtype: None
"""
_deploy_group(path)
try:
yield
finally:
_remove_group(group_id)
|
{
"content_hash": "3ff63283f823fd38c6b8470da0eba2a6",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 31.374485596707817,
"alnum_prop": 0.5853882476390346,
"repo_name": "Yhgenomics/dcos-cli",
"id": "783afaa6768753542349ad19911cf18187fdba53",
"size": "7624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cli/tests/integrations/test_marathon_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "282"
},
{
"name": "PowerShell",
"bytes": "6623"
},
{
"name": "Python",
"bytes": "442845"
},
{
"name": "Shell",
"bytes": "6015"
}
],
"symlink_target": ""
}
|
import pifacecad
from carpu.events import GetNextSongEvent
from carpu.events import VolumeUPEvent
from carpu.events import VolumeDOWNEvent
class Face(object):
def __init__(self, server):
self.events = server
self.cad = pifacecad.PiFaceCAD()
self.cad.lcd.set_cursor(3, 0)
self.cad.lcd.write("started")
self.cad.lcd.blink_off()
self.cad.lcd.backlight_on()
self.switchlistener = pifacecad.SwitchEventListener(chip=self.cad)
for x in range(8):
self.switchlistener.register(x, pifacecad.IODIR_ON, self.test)
self.switchlistener.activate()
def stop(self):
self.switchlistener.deactivate()
def test(self, event):
if event.pin_num == 0:
self.events.fire(GetNextSongEvent(None))
elif event.pin_num == 1:
print("Pause")
elif event.pin_num == 6:
self.events.fire(VolumeDOWNEvent(None))
elif event.pin_num == 7:
self.events.fire(VolumeUPEvent(None))
print(repr(event.pin_num))
|
{
"content_hash": "3791f931792cad77905b92a72f1e5ebd",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 27.846153846153847,
"alnum_prop": 0.6151012891344383,
"repo_name": "duct-tape/taped-car-stereo",
"id": "90a5d551173936447af3d225a28ff2133463eb1c",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carpu/face.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6904"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import bluebottle.fsm
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('funding', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', bluebottle.fsm.FSMField(default=b'new', max_length=20)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='donation',
name='amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='donation',
name='amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=50),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='funding',
name='target_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=50),
),
migrations.AddField(
model_name='payment',
name='donation',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='payment', to='funding.Donation'),
),
migrations.AddField(
model_name='payment',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_funding.payment_set+', to='contenttypes.ContentType'),
),
]
|
{
"content_hash": "b9a93c5ab9a65d9183332fcad0cd13c8",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 188,
"avg_line_length": 40.26229508196721,
"alnum_prop": 0.5977198697068404,
"repo_name": "onepercentclub/bluebottle",
"id": "c46c258498cf2e012c9d2fd320684846ac9d0364",
"size": "2530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/funding/migrations/0002_auto_20190604_1458.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0015_auto_20190122_1638'),
]
operations = [
migrations.AlterField(
model_name='areaencounter',
name='code',
field=models.SlugField(blank=True, help_text='A URL-safe, short code for the area. Multiple records of the same Area will be recognised by the same area type and code.', max_length=1000, null=True, verbose_name='Area code'),
),
migrations.AlterField(
model_name='areaencounter',
name='encountered_by',
field=models.ForeignKey(blank=True, help_text='The person who experienced the original encounter.', null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Encountered by'),
),
migrations.AlterField(
model_name='areaencounter',
name='source',
field=models.PositiveIntegerField(choices=[(0, 'Direct entry'), (1, 'Manual entry from paper datasheet'), (2, 'Digital data capture (ODK)'), (3, 'Partial survey'), (10, 'Threatened Fauna'), (11, 'Threatened Flora'), (12, 'Threatened Communities'), (13, 'Threatened Communities Boundaries'), (14, 'Threatened Communities Buffers'), (15, 'Threatened Communities Sites'), (20, 'Turtle Tagging Database WAMTRAM2'), (21, 'Ningaloo Turtle Program'), (22, 'Broome Turtle Program'), (23, 'Pt Hedland Turtle Program'), (24, 'Gnaraloo Turtle Program'), (25, 'Eco Beach Turtle Program'), (30, 'Cetacean Strandings Database'), (31, 'Pinniped Strandings Database')], default=0, help_text='Where was this record captured initially?', verbose_name='Data Source'),
),
migrations.AlterField(
model_name='areaencounter',
name='source_id',
field=models.CharField(default=uuid.UUID('66275b8a-2833-11e9-a86f-ecf4bb19b5fc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),
),
]
|
{
"content_hash": "380c2cd7edb7f86fd1618175178223c4",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 760,
"avg_line_length": 61.371428571428574,
"alnum_prop": 0.6680633147113594,
"repo_name": "parksandwildlife/wastd",
"id": "e1859be18d8cb0ba3614a91eec1dc5f1108206d0",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occurrence/migrations/0016_auto_20190204_1214.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9150"
},
{
"name": "HTML",
"bytes": "60851"
},
{
"name": "JavaScript",
"bytes": "18966"
},
{
"name": "Python",
"bytes": "853568"
},
{
"name": "Shell",
"bytes": "4200"
},
{
"name": "TeX",
"bytes": "16951"
}
],
"symlink_target": ""
}
|
"""
The logger module itself has the common logging functions of Python's
:class:`logging.Logger`. For example:
.. code-block:: python
from tensorpack.utils import logger
logger.set_logger_dir('train_log/test')
logger.info("Test")
logger.error("Error happened!")
"""
import logging
import os
import os.path
import shutil
import sys
from datetime import datetime
from six.moves import input
from termcolor import colored
__all__ = ['set_logger_dir', 'auto_set_dir', 'get_logger_dir']
class _MyFormatter(logging.Formatter):
def format(self, record):
date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if record.levelno == logging.WARNING:
fmt = date + ' ' + colored('WRN', 'red', attrs=['blink']) + ' ' + msg
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
fmt = date + ' ' + colored('ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg
elif record.levelno == logging.DEBUG:
fmt = date + ' ' + colored('DBG', 'yellow', attrs=['blink']) + ' ' + msg
else:
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(_MyFormatter, self).format(record)
def _getlogger():
# this file is synced to "dataflow" package as well
package_name = "dataflow" if __name__.startswith("dataflow") else "tensorpack"
logger = logging.getLogger(package_name)
logger.propagate = False
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
logger.addHandler(handler)
return logger
_logger = _getlogger()
_LOGGING_METHOD = ['info', 'warning', 'error', 'critical', 'exception', 'debug', 'setLevel', 'addFilter']
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
__all__.append(func)
# 'warn' is deprecated in logging module
warn = _logger.warning
__all__.append('warn')
def _get_time_str():
return datetime.now().strftime('%m%d-%H%M%S')
# globals: logger file and directory:
LOG_DIR = None
_FILE_HANDLER = None
def _set_file(path):
global _FILE_HANDLER
if os.path.isfile(path):
backup_name = path + '.' + _get_time_str()
shutil.move(path, backup_name)
_logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name)) # noqa: F821
hdl = logging.FileHandler(
filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
_logger.info("Argv: " + ' '.join(sys.argv))
def set_logger_dir(dirname, action=None):
"""
Set the directory for global logging.
Args:
dirname(str): log directory
action(str): an action of ["k","d","q"] to be performed
when the directory exists. Will ask user by default.
"d": delete the directory. Note that the deletion may fail when
the directory is used by tensorboard.
"k": keep the directory. This is useful when you resume from a
previous training and want the directory to look as if the
training was not interrupted.
Note that this option does not load old models or any other
old states for you. It simply does nothing.
"""
dirname = os.path.normpath(dirname)
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
# unload and close the old file handler, so that we may safely delete the logger directory
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
def dir_nonempty(dirname):
# If directory exists and nonempty (ignore hidden files), prompt for action
return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.'])
if dir_nonempty(dirname):
if not action:
_logger.warning("""\
Log directory {} exists! Use 'd' to delete it. """.format(dirname))
_logger.warning("""\
If you're resuming from a previous run, you can choose to keep it.
Press any other key to exit. """)
while not action:
action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip()
act = action
if act == 'b':
backup_name = dirname + _get_time_str()
shutil.move(dirname, backup_name)
info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821
elif act == 'd':
shutil.rmtree(dirname, ignore_errors=True)
if dir_nonempty(dirname):
shutil.rmtree(dirname, ignore_errors=False)
elif act == 'n':
dirname = dirname + _get_time_str()
info("Use a new log directory {}".format(dirname)) # noqa: F821
elif act == 'k':
pass
else:
raise OSError("Directory {} exits!".format(dirname))
LOG_DIR = dirname
from .fs import mkdir_p
mkdir_p(dirname)
_set_file(os.path.join(dirname, 'log.log'))
def auto_set_dir(action=None, name=None):
"""
Use :func:`logger.set_logger_dir` to set log directory to
"./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running"""
mod = sys.modules['__main__']
basename = os.path.basename(mod.__file__)
auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')])
if name:
auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name
set_logger_dir(auto_dirname, action=action)
def get_logger_dir():
"""
Returns:
The logger directory, or None if not set.
The directory is used for general logging, tensorboard events, checkpoints, etc.
"""
return LOG_DIR
|
{
"content_hash": "0f9a173c07ba47159896dd7722ac2698",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 108,
"avg_line_length": 34.812865497076025,
"alnum_prop": 0.6092726356458928,
"repo_name": "ppwwyyxx/tensorpack",
"id": "31d24d3260264f8c960ce36c39fe56eb286f39fc",
"size": "5996",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorpack/utils/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "731254"
},
{
"name": "Shell",
"bytes": "1581"
}
],
"symlink_target": ""
}
|
from filecmp import demo
from django.conf.app_template import admin
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render
from django.utils.datetime_safe import datetime
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.http import HttpResponseRedirect, JsonResponse
from buzzit_messaging.views import __send_system__message__
from buzzit_models.models import *
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
import django.contrib.messages as messages
from django.views.generic import ListView, DetailView
from buzzit_models.models import *
from django.contrib.auth.decorators import login_required
import json
from django.core.mail import send_mail
@login_required
def report_user(request, user_id):
"""
current user report other user, and gives the reason which should not be empty
:param request:
:param user_id:
:return:
"""
if request.method == "POST":
try:
reported_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht.")
return HttpResponseRedirect(reverse_lazy("home"))
if request.method == "POST":
report_message = UserReport()
report_text = request.POST.get("text", False)
try:
if report_text:
report_message.text = report_text
except ObjectDoesNotExist:
messages.error(request, "Fehler")
if len(report_message.text) < 1:
messages.error(request, "Text zum Benutzermelden ist zu geben")
return HttpResponseRedirect(reverse_lazy("home"))
report_message.creator = request.user
report_message.created = datetime.now()
report_message.reported_user = reported_user
report_message.save()
messages.info(request, "Sie haben den <User:%s> Benutzer gemeldet" % reported_user)
return HttpResponseRedirect(reverse_lazy('home'))
else:
try:
reported_profile = Profile.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht.")
return HttpResponseRedirect(reverse_lazy("home"))
return render(request, "logged_in/report_user.html", {"profile": reported_profile})
class UserReportDetailsView(SuccessMessageMixin, ListView):
"""
display the report text and reported user
"""
model = UserReport
template_name = "logged_in/user_report_details.html"
def get_queryset(self):
report_id = self.kwargs.get("report_id")
try:
return UserReport.objects.filter(pk=report_id).order_by("created")
except ObjectDoesNotExist:
messages.error(self.request, "Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
def get_context_data(self, **kwargs):
context = super(UserReportDetailsView, self).get_context_data(**kwargs)
report_id = self.kwargs.get("report_id")
try:
report = UserReport.objects.get(pk=report_id)
except ObjectDoesNotExist:
messages.error(self.request, "Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
reported_user_profile = report.reported_user.profile
context["profile"] = reported_user_profile
context["userreport"] = report
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserReportDetailsView, self).dispatch(request, args, kwargs)
class AdminFrontpageView():
pass
@login_required
def adminFrontPage(request):
"""
show all userreports and
postreports
:param request:
:return:
"""
if request.user.is_superuser:
userreports = UserReport.objects.filter(closed=False).all()
postreports = CircleMessageReport.objects.filter(closed=False).all()
return render(request, "logged_in/admin_dashboard.html",
{"user_reports": userreports, "post_reports": postreports})
else:
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
class MessageReportDetailsView(DetailView):
model = CircleMessageReport
slug_field = "id"
template_name = "logged_in/post_report_details.html"
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MessageReportDetailsView, self).dispatch(request, args, kwargs)
@login_required
def AdminOverviewView(request):
if request.user.is_superuser:
adminlist = []
adminlist = User.objects.filter(is_superuser=True)
return render(request, "logged_in/admin_list.html", {"userlist": adminlist})
else:
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
@login_required
def delete_reported_post(request, report_id):
"""
delete reported message from admin, check if the message has answers,
reported message with all answers would be delete, else delete only message
TODO was ist, wenn eine Nachricht rebuzzed wurde
:param request:
:param message_id:
:return:
"""
try:
report = CircleMessageReport.objects.get(pk=report_id)
except ObjectDoesNotExist:
messages.error(request, "Der Report existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
# if the reported post has anwsers, delete all
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
post_to_del = report.reported_message
answers = Circle_message.objects.filter(answer_to=post_to_del)
answers.delete()
post_to_del.delete()
report.issuer = request.user
report.valid = True
report.closed = True
messages.success(request, "Die Nachrichte wurde erfolgreich geloescht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
@login_required
def promote_user_to_admin(request, user_id):
"""
check if user exists, then check if user is active
:param request:
:param user_id:
:return:
"""
try:
admin_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benuzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (admin_user.is_active):
messages.info(request, "Der Benutzer ist deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
admin_user.is_superuser = True
admin_user.save()
messages.info(request, "Der Benutzer %s ist als AdminUser hinzugefuegt" % (admin_user.username,))
return HttpResponseRedirect(reverse_lazy("admins_overview"))
@login_required
def demote_admin_to_user(request, user_id):
"""
check if user exists, check if user is adminUser
:param request:
:param user_id:
:return:
"""
try:
demote_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (demote_user.is_superuser):
messages.error(request, "Der Benutzer ist kein Admin ")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
demote_user.is_superuser = False
demote_user.save()
messages.info(request, "Die Adminrechte von dem Benutzer wird entziehen")
return HttpResponseRedirect(reverse_lazy("admins_overview"))
@login_required
def report_message(request, message_id):
"""
Report a circlemessage with given <message_id>, if that exists.
If that does not exist, then an error for the user is returned and he gets redirected to home.
If that message exists,
then the report will be created, if an reason (report.text) was given.
The report is saved then.
if there is no reason, an error will be created and the user is redirected to home.
:param request:
:param message_id:
:return:
"""
try:
reported_message = Circle_message.objects.get(pk=message_id)
except Exception:
messages.error(request, "Die Nachricht existiert nicht")
return HttpResponseRedirect(reverse("home"))
if request.method == "POST":
report = CircleMessageReport()
report.reported_message = reported_message
report.text = request.POST.get("text", False)
if not report.text or len(report.text) < 1:
messages.error(request, "Keine Begruendung angegeben")
return HttpResponseRedirect(reverse("home"))
report.creator = request.user
report.created = datetime.now()
report.save()
messages.success(request, "Nachricht wurde gemeldet")
return HttpResponseRedirect(reverse("home"))
reported_profile = Profile.objects.get(pk=reported_message.creator.pk)
return render(request, "logged_in/report_post.html",
{"profile": reported_profile, "circlemessage": reported_message})
@login_required
def ban_user(request, user_id):
"""
set ban user and send email to him with reason,TODO provides ban user information to contact with admin user
:param request:
:param user_id:
:return:
"""
try:
user_to_be_ban = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die ntigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (user_to_be_ban.is_active):
messages.info(request, "Der Benutzer ist bereits deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
message_for_ban = request.GET.get("text", False)
user_to_be_ban.is_active = False
user_to_be_ban.save()
send_mail("Deaktivieren dein Account", message="Grund zum Deaktivieren: '%s'" % message_for_ban,
html_message="<html><h3>um Deinen Account zu wieder aktivieren, kontaktieren Sie bitte :</h3>" +
"<a href='%s'>Klicke hier um den Account wieder zu aktivieren!</a>." +
"</html>", from_email="AccountAktivierung@vps146949.ovh.net",
recipient_list=(user_to_be_ban.email,))
messages.info(request, "Der Benutzer ist deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
@login_required
def setIgnoreReport(request, report_id):
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
try:
report = Report.objects.get(pk=report_id)
except:
messages.error(request, "Report existiert nicht")
return HttpResponseRedirect(reverse("admin_frontpage"))
report.closed = True
report.valid = False
report.issuer = request.user
report.save()
messages.success(request, "Report wurde ignoriert")
return HttpResponseRedirect(reverse("admin_frontpage"))
|
{
"content_hash": "e073d84b7fb89a32018f804d4a5daf8a",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 112,
"avg_line_length": 38.86129032258064,
"alnum_prop": 0.681248443595916,
"repo_name": "jmennen/group5",
"id": "78ec8d3007d668254019aa68583b773c3fda1c41",
"size": "12047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/buzzit/buzzit_admin/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "136971"
},
{
"name": "HTML",
"bytes": "224970"
},
{
"name": "JavaScript",
"bytes": "187926"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "95098"
}
],
"symlink_target": ""
}
|
import zmq
from .messenger import Messenger
class Publisher(Messenger):
def _init_socket(self):
self.socket = self.context.socket(zmq.PUB)
|
{
"content_hash": "00bda36cfbbbb80281dfe3dce2f24d63",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 19.25,
"alnum_prop": 0.7142857142857143,
"repo_name": "jtimon/zmqmin",
"id": "63fcad8da071798518a86467e4611c22e72c86a0",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zmqmin/publisher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8388"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import config
import file_utils
def _get_implementation(classname, vpi, card):
content = []
if card == '1':
shallow_visit = 'false'
if vpi in ['vpiParent', 'vpiInstance', 'vpiModule', 'vpiInterface', 'vpiUse', 'vpiProgram', 'vpiClassDefn', 'vpiPackage', 'vpiUdp']:
# Prevent walking upwards and makes the UHDM output cleaner
# Prevent loop in Standard VPI
shallow_visit = 'true'
if 'func_call' in classname and vpi == 'vpiFunction':
# Prevent stepping inside functions while processing calls (func_call, method_func_call) to them
shallow_visit = 'true'
if 'task_call' in classname and vpi == 'vpiTask':
# Prevent stepping inside tasks while processing calls (task_call, method_task_call) to them
shallow_visit = 'true'
if classname in ['ref_obj']:
# Ref_obj are always printed shallow
shallow_visit = 'true'
content.append(f' if (vpiHandle itr = vpi_handle({vpi}, obj_h)) {{')
content.append(f' visit_object(itr, indent + kLevelIndent, "{vpi}", visited, out, {shallow_visit});')
content.append( ' release_handle(itr);')
content.append( ' }')
else:
content.append(f' if (vpiHandle itr = vpi_iterate({vpi}, obj_h)) {{')
content.append( ' while (vpiHandle obj = vpi_scan(itr)) {')
content.append(f' visit_object(obj, indent + kLevelIndent, "{vpi}", visited, out, false);')
content.append( ' release_handle(obj);')
content.append( ' }')
content.append( ' release_handle(itr);')
content.append( ' }')
return content
def _get_vpi_xxx_visitor(type, vpi, card):
content = []
if vpi == 'vpiValue':
content.append(' s_vpi_value value;')
content.append(' vpi_get_value(obj_h, &value);')
content.append(' if (value.format) {')
content.append(' std::string val = visit_value(&value);')
content.append(' if (!val.empty()) {')
content.append(' stream_indent(out, indent) << val;')
content.append(' }')
content.append(' }')
elif vpi == 'vpiDelay':
content.append(' s_vpi_delay delay;')
content.append(' vpi_get_delays(obj_h, &delay);')
content.append(' if (delay.da != nullptr) {')
content.append(' stream_indent(out, indent) << visit_delays(&delay);')
content.append(' }')
elif (card == '1') and (vpi not in ['vpiType', 'vpiFile', 'vpiLineNo', 'vpiColumnNo', 'vpiEndLineNo', 'vpiEndColumnNo']):
if type == 'string':
content.append(f' if (const char* s = vpi_get_str({vpi}, obj_h))')
content.append(f' stream_indent(out, indent) << "|{vpi}:" << s << "\\n";') # no std::endl, avoid flush
else:
content.append(f' if (const int n = vpi_get({vpi}, obj_h))')
content.append(f' stream_indent(out, indent) << "|{vpi}:" << n << "\\n";') # no std::endl, avoid flush
return content
def generate(models):
visit_object_body = []
private_visitor_bodies = []
ignored_objects = set(['vpiNet'])
for model in models.values():
modeltype = model['type']
if modeltype == 'group_def':
continue
classname = model['name']
if classname in ['net_drivers', 'net_loads']:
continue
vpi_name = config.make_vpi_name(classname)
if vpi_name not in ignored_objects:
visit_object_body.append(f' case {vpi_name}: visit_{classname}(obj_h, indent, relation, visited, out, shallowVisit); break;')
private_visitor_bodies.append(f'static void visit_{classname}(vpiHandle obj_h, int indent, const char *relation, VisitedContainer* visited, std::ostream& out, bool shallowVisit) {{')
# Make sure vpiParent is called before the base class visit.
if modeltype != 'class_def':
private_visitor_bodies.extend(_get_implementation(classname, 'vpiParent', '1'))
baseclass = model.get('extends', None)
if baseclass:
private_visitor_bodies.append(f' visit_{baseclass}(obj_h, indent, relation, visited, out, shallowVisit);')
if modeltype != 'class_def':
private_visitor_bodies.extend(_get_vpi_xxx_visitor('string', 'vpiFile', '1'))
type_specified = False
for key, value in model.allitems():
if key == 'property':
name = value.get('name')
vpi = value.get('vpi')
type = value.get('type')
card = value.get('card')
type_specified = name == 'type' or type_specified
private_visitor_bodies.extend(_get_vpi_xxx_visitor(type, vpi, card))
elif key in ['class', 'obj_ref', 'class_ref', 'group_ref']:
vpi = value.get('vpi')
card = value.get('card')
private_visitor_bodies.extend(_get_implementation(classname, vpi, card))
if not type_specified and (modeltype == 'obj_def'):
private_visitor_bodies.extend(_get_vpi_xxx_visitor('unsigned int', 'vpiType', '1'))
private_visitor_bodies.append(f'}}')
private_visitor_bodies.append('')
visitors = [ f' switch (objectType) {{' ] + sorted(visit_object_body) + [ f' }}' ]
# vpi_visitor.cpp
with open(config.get_template_filepath('vpi_visitor.cpp'), 'rt') as strm:
file_content = strm.read()
file_content = file_content.replace('<OBJECT_VISITORS>', '\n'.join(visitors))
file_content = file_content.replace('<PRIVATE_OBJECT_VISITORS>', '\n'.join(private_visitor_bodies))
file_utils.set_content_if_changed(config.get_output_source_filepath('vpi_visitor.cpp'), file_content)
return True
def _main():
import loader
config.configure()
models = loader.load_models()
return generate(models)
if __name__ == '__main__':
import sys
sys.exit(0 if _main() else 1)
|
{
"content_hash": "6e359d8dab9483017b68e38aeb633500",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 190,
"avg_line_length": 40.158940397350996,
"alnum_prop": 0.5839379947229552,
"repo_name": "chipsalliance/UHDM",
"id": "4933f64e364d09ee2809c2882c4c0bea972bafe5",
"size": "6064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/vpi_visitor_cpp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "116563"
},
{
"name": "C++",
"bytes": "495591"
},
{
"name": "CMake",
"bytes": "10936"
},
{
"name": "Cap'n Proto",
"bytes": "202"
},
{
"name": "Makefile",
"bytes": "1441"
},
{
"name": "Nix",
"bytes": "665"
},
{
"name": "Python",
"bytes": "107869"
},
{
"name": "Tcl",
"bytes": "96905"
}
],
"symlink_target": ""
}
|
from infotv.policy import BasePolicy
class TraconPolicy(BasePolicy):
def get_event_slug(self, request, slug):
return slug
def can_edit_slides(self, request):
return request.user.is_staff
def can_post_datum(self, request):
return request.user.is_staff
|
{
"content_hash": "95086bda7781e32c130120a739df79c7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 44,
"avg_line_length": 24.25,
"alnum_prop": 0.6872852233676976,
"repo_name": "tracon/infotv-tracon",
"id": "c64bd77b443d5c3c7951a7940f89f5abd603a05b",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infotv_tracon/policy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "897"
},
{
"name": "Python",
"bytes": "13365"
}
],
"symlink_target": ""
}
|
from flask import Blueprint,request,redirect,url_for, render_template
from bs4 import BeautifulSoup
import requests
from urllib.parse import urljoin
from URLOptimize import URLOptimize
links_blueprint = Blueprint('links_blueprint',__name__)
@links_blueprint.route('/links')
def links():
url = request.args.get('url')
if not url:
redirect(url_for('root'))
html_page = requests.get(url).text
soup = BeautifulSoup(html_page,'lxml')
urls = []
for link in soup.findAll('a'):
# resolve relative
link_url = urljoin(url,link.get('href'))
urls+=[link_url]
urls_dict = URLOptimize.optimize_urls(urls)
return render_template('links.html',main_url = url,urls_dict = urls_dict)
|
{
"content_hash": "d304b3f06078383cf6861f161dc1d8a4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 29.36,
"alnum_prop": 0.6839237057220708,
"repo_name": "farseenabdulsalam/wap-proxy",
"id": "fb8a53e1651c048185c0c2d8e6babefc66403686",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LinksBlueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4277"
},
{
"name": "Python",
"bytes": "17372"
}
],
"symlink_target": ""
}
|
import pyCliConf
import inspect
def main():
print inspect.getsourcelines(pyCliConf.CliConf)
main()
|
{
"content_hash": "7b4fe685a90db32617049c0282401040",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 15,
"alnum_prop": 0.7714285714285715,
"repo_name": "JNPRAutomate/pyCliConf",
"id": "f1f869224266cef5f0c993a605b4bb0956513fd1",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyCliConf/command_line.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "28088"
}
],
"symlink_target": ""
}
|
l=[0,1,2]
i=iter(l)
print i
print i.next()
print i.next()
print i.next()
print i.next()
'''
$ python exceptions_StopIteration.py
<listiterator object at 0x10045f650>
0
1
2
Traceback (most recent call last):
File "exceptions_StopIteration.py", line 19, in <module>
print i.next()
StopIteratiop
'''
|
{
"content_hash": "9e7c5f2b3b233882f6139dd9f3af9ac7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 14.571428571428571,
"alnum_prop": 0.6993464052287581,
"repo_name": "lmokto/allexceptions",
"id": "934403beaaf4b180e03299ef44678f01f5d9fa9a",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exceptions_StopIteration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13114"
}
],
"symlink_target": ""
}
|
import cjson
import os
import re
import string
import time
import rdflib
from rdflib.Graph import Graph
from datetime import date
from Axon.Ipc import producerFinished
from Axon.Ipc import shutdownMicroprocess
from Axon.ThreadedComponent import threadedcomponent
from Kamaelia.Apps.SocialBookmarks.Print import Print
from Kamaelia.Apps.SocialBookmarks.DBWrapper import DBWrapper
class Requester(DBWrapper,threadedcomponent):
Inboxes = {
"inbox" : "",
"control" : "",
"whatson" : "Receives back what's currently on a channel - [pid,title,timeoffset,duration,expectedstarttime]",
"proginfo" : "Receives back raw RDF data for a PID",
"search" : "Receives back raw Twitter people search JSON",
"datain" : "URL contents returns from getter component",
}
Outboxes = {
"outbox" : "Sends out keywords and pid(s) for streaming API connections - [[keyword,keyword],[pid,pid,pid]]",
"signal" : "",
"whatson" : "Requests current programmes by sending a channel name",
"proginfo" : "Requests RDF format data for a pid - [pid, 'rdf']",
"search" : "Sends people's names for Twitter username identification",
"dataout" : "URL requests to getter component",
}
def __init__(self, channel,dbuser,dbpass):
super(Requester, self).__init__(dbuser=dbuser,dbpass=dbpass)
self.channel = channel
# Keep a record of the current PID for each channel here
self.channels = {
"bbcone" : "",
"bbctwo" : "",
"bbcthree" : "",
"bbcfour" : "",
"cbbc" : "",
"cbeebies" : "",
"bbcnews" : "",
"radio1" : "",
"radio2" : "",
"radio3" : "",
"radio4" : "",
"5live" : "",
"worldservice" : "",
"6music" : "",
"radio7" : "",
"1xtra" : "",
"bbcparliament" : "",
"asiannetwork" : "",
"sportsextra" : ""
}
# Brand PIDs associated with programmes. New progs don't always have brands, but it's a start
# Ideally this would be replaced by the BBC Buzz database, but that's not yet accessible AFAIK and doesn't always store tags for new programmes.
# This doesn't help in the channel case where for example radio 1 uses @bbcr1
self.officialbrandtags = {
"b00vc3rz" : ["#genius","bbcgenius"], # Genius with Dave Gorman
"b006t1q9" : ["#bbcqt","bbcquestiontime"], # Question Time
"b009w2w3" : ["#laterjools", "bbclater"], # Later with Jools Holland
"b00lwxj1" : ["bbcbang"], # Bang goes the theory
"b006m8dq" : ["#scd", "bbcstrictly"], # Strictly come dancing
"b006ml0g" : ["qikipedia", "#qi"], # QI
"b00j4j7g" : ["#f1"], # Formula 1
"b006wkqb" : ["chrisdjmoyles","chrismoylesshow"], # Chris Moyles Breakfast Show
"b0071b63" : ["bbcapprentice"], # The Apprentice
"b006mg74" : ["bbcwatchdog"], # Watchdog
"b006v5tb" : ['bbcbreakfast'], # Breakfast
"b006mkw3" : ["hignfy","bbchignfy"], # Have I Got News For You
"b008dk4b" : ["childreninneed","bbccin","#cin","#pudsey","pudseybear"], # Children in Need
}
# Series PIDs associated with programmes. ONLY used where prog doesn't have a brand
self.officialseriestags = {
"b00v2z3s" : ["#askrhod"], # Ask Rhod Gilbert
"b00vd7qz" : ['film2010'], # Film 2010
"b00vsw36" : ['manlab'] # James May's Man Lab
}
self.firstrun = True
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def doStuff(self, channel):
# Check what's on for each channel
self.send(channel, "whatson")
while not self.dataReady("whatson"):
pass
data = self.recv("whatson")
if data == None:
pid = None
else:
pid = data[0]
title = data[1]
offset = data[2]
duration = data[3]
expectedstart = data[4]
if pid != self.channels[channel]:
# Perhaps just do a duplicate scan before creating Twitter stream
if pid == None:
self.channels[channel] = None
Print (channel, ": Off Air")
else:
self.channels[channel] = pid
self.send(["http://www.bbc.co.uk/programmes/" + pid + ".rdf"], "dataout")
while not self.dataReady("datain"):
pass
recvdata = self.recv("datain")
if recvdata[0] == "OK":
programmedata = recvdata[1]
else:
# Fake programme data to prevent crash - not ideal
programmedata = '<?xml version="1.0" encoding="utf-8"?> \
<rdf:RDF xmlns:rdf = "http://www.w3.org/1999/02/22-rdf-syntax-ns#" \
xmlns:rdfs = "http://www.w3.org/2000/01/rdf-schema#" \
xmlns:owl = "http://www.w3.org/2002/07/owl#" \
xmlns:foaf = "http://xmlns.com/foaf/0.1/" \
xmlns:po = "http://purl.org/ontology/po/" \
xmlns:mo = "http://purl.org/ontology/mo/" \
xmlns:skos = "http://www.w3.org/2008/05/skos#" \
xmlns:time = "http://www.w3.org/2006/time#" \
xmlns:dc = "http://purl.org/dc/elements/1.1/" \
xmlns:dcterms = "http://purl.org/dc/terms/" \
xmlns:wgs84_pos= "http://www.w3.org/2003/01/geo/wgs84_pos#" \
xmlns:timeline = "http://purl.org/NET/c4dm/timeline.owl#" \
xmlns:event = "http://purl.org/NET/c4dm/event.owl#"> \
</rdf:RDF>'
# RDF reader needs to read from a file so write out first
# Alternative is to read from a URL, but this lacks proper proxy support
filepath = "tempRDF.txt"
file = open(filepath, 'w')
file.write(programmedata)
file.close()
g = Graph()
# This is a temporary proxy fix. A URL could be put here instead
g.parse("tempRDF.txt")
# Identify the brand and whether there are any official hashtags
twittags = list()
for bid in g.subjects(object = rdflib.URIRef('http://purl.org/ontology/po/Brand')):
# bid is Brand ID
bidmod = bid.replace("#programme","")
bidmod = str(bidmod.replace("file:///programmes/",""))
if self.officialbrandtags.has_key(bidmod):
twittags = self.officialbrandtags[bidmod]
break
# Identify the series and whether there are any official hashtags
if len(twittags) == 0:
# Identify the brand and whether there are any official hashtags
for sid in g.subjects(object = rdflib.URIRef('http://purl.org/ontology/po/Series')):
# sid is Series ID
sidmod = sid.replace("#programme","")
sidmod = str(sidmod.replace("file:///programmes/",""))
if self.officialseriestags.has_key(sidmod):
twittags = self.officialseriestags[sidmod]
break
vidmod = ""
so = g.subject_objects(predicate=rdflib.URIRef('http://purl.org/ontology/po/version'))
# Pick a version, any version - for this which one doesn't matter
for x in so:
# vid is version id
vid = x[1]
vidmod = vid.replace("#programme","")
vidmod = vidmod.replace("file:///programmes/","")
break
# Got version, now get people
self.send(["http://www.bbc.co.uk/programmes/" + vidmod + ".rdf"], "dataout")
while not self.dataReady("datain"):
pass
recvdata = self.recv("datain")
if recvdata[0] == "OK":
versiondata = recvdata[1]
else:
versiondata = '<?xml version="1.0" encoding="utf-8"?> \
<rdf:RDF xmlns:rdf = "http://www.w3.org/1999/02/22-rdf-syntax-ns#" \
xmlns:rdfs = "http://www.w3.org/2000/01/rdf-schema#" \
xmlns:owl = "http://www.w3.org/2002/07/owl#" \
xmlns:foaf = "http://xmlns.com/foaf/0.1/" \
xmlns:po = "http://purl.org/ontology/po/" \
xmlns:mo = "http://purl.org/ontology/mo/" \
xmlns:skos = "http://www.w3.org/2008/05/skos#" \
xmlns:time = "http://www.w3.org/2006/time#" \
xmlns:dc = "http://purl.org/dc/elements/1.1/" \
xmlns:dcterms = "http://purl.org/dc/terms/" \
xmlns:wgs84_pos= "http://www.w3.org/2003/01/geo/wgs84_pos#" \
xmlns:timeline = "http://purl.org/NET/c4dm/timeline.owl#" \
xmlns:event = "http://purl.org/NET/c4dm/event.owl#"> \
</rdf:RDF>'
filepath = "tempRDF.txt"
file = open(filepath, 'w')
file.write(versiondata)
file.close()
g = Graph()
g.parse("tempRDF.txt")
# Identify if this is a change of programme, or the first time we've checked what's on for Print clarity
if self.firstrun:
Print (channel , ": " + title)
else:
Print (channel , ": Changed to - " , title)
# Minor alterations
title = title.replace("&","and")
if ":" in title:
titlebits = title.split(":")
title = titlebits[0]
# Saving a copy here so apostrophes etc can be used in the Twitter people search
titlesave = title
# Remove punctuation
for item in """!"#$%()*+,-./;<=>?@[\\]?_'`{|}?""":
title = title.replace(item,"")
keywords = dict()
# Save keywords next to a descriptor of what they are
keywords[pid] = "PID"
# Add official hashtags to the list
for tag in twittags:
keywords[tag] = "Twitter"
# Duplicates will be removed later
if string.find(title,"The",0,3) != -1:
newtitle = string.replace(re.sub("\s+","",title),"The ","",1)
keywords[channel] = "Channel"
keywords["#" + string.lower(re.sub("\s+","",title))] = "Title"
# Check for and remove year too
keywords["#" + string.replace(string.lower(re.sub("\s+","",title))," " + str(date.today().year),"",1)] = "Title"
keywords['#' + string.lower(re.sub("\s+","",newtitle))] = "Title"
# Check for and remove year too
keywords['#' + string.replace(string.lower(re.sub("\s+","",newtitle))," " + str(date.today().year),"",1)] = "Title"
else:
keywords[channel] = "Channel"
keywords["#" + string.lower(re.sub("\s+","",title))] = "Title"
keywords["#" + string.replace(string.lower(re.sub("\s+","",title))," " + str(date.today().year),"",1)] = "Title"
allwordtitle = string.replace(title,"The ","",1)
allwordtitle = allwordtitle.lower()
# Remove current year from events
allwordtitle = allwordtitle.replace(" " + str(date.today().year),"",1)
titlewords = allwordtitle.split()
if len(titlewords) > 1:
keywords[allwordtitle] = "Title"
else:
# Trial fix for issue of one word titles producing huge amounts of data
keywords[allwordtitle + "^" + "bbc"] = "Title"
keywords["#" + re.sub("\s+","",allwordtitle)] = "Title"
numwords = dict({"one" : 1, "two" : 2, "three": 3, "four" : 4, "five": 5, "six" : 6, "seven": 7})
for word in numwords:
if word in channel.lower() and channel != "asiannetwork": # Bug fix! asianne2rk
numchannel = string.replace(channel.lower(),word,str(numwords[word]))
keywords[numchannel] = "Channel"
break
if str(numwords[word]) in channel.lower():
numchannel = string.replace(channel.lower(),str(numwords[word]),word)
keywords[numchannel] = "Channel"
break
# Load NameCache (people we've already searched for on Twitter to avoid hammering PeopleSearch)
save = False
try:
homedir = os.path.expanduser("~")
file = open(homedir + "/namecache.conf",'r')
save = True
except IOError, e:
Print ("Failed to load name cache - will attempt to create a new file: " , e)
if save:
raw_config = file.read()
file.close()
try:
config = cjson.decode(raw_config)
except cjson.DecodeError, e:
config = dict()
else:
config = dict()
s = g.subjects(predicate=rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),object=rdflib.URIRef('http://purl.org/ontology/po/Role'))
for x in s:
rid = g.value(predicate=rdflib.URIRef('http://purl.org/ontology/po/role'),object=rdflib.BNode(x))
pid = g.value(subject=rdflib.BNode(rid),predicate=rdflib.URIRef('http://purl.org/ontology/po/participant'))
firstname = str(g.value(subject=rdflib.BNode(pid),predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/givenName')))
lastname = str(g.value(subject=rdflib.BNode(pid),predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/familyName')))
if config.has_key(firstname + " " + lastname):
# Found a cached value
if config[firstname + " " + lastname] != "":
keywords[config[firstname + " " + lastname]] = "Twitter"
else:
# Not cached yet - new request
self.send(firstname + " " + lastname, "search")
while not self.dataReady("search"):
pass
twitdata = self.recv("search")
screenname = ""
try:
for user in twitdata:
# Only use this Twitter screen name if there's a good chance they're the person we're after
if user.has_key('verified'):
if (user['verified'] == True or user['followers_count'] > 10000) and string.lower(user['name']) == string.lower(firstname + " " + lastname):
screenname = user['screen_name']
keywords[screenname] = "Twitter"
break
except AttributeError, e:
pass
config[firstname + " " + lastname] = screenname
keywords[firstname + " " + lastname] = "Participant"
s = g.subjects(predicate=rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),object=rdflib.URIRef('http://purl.org/ontology/po/Character'))
for x in s:
character = str(g.value(subject=rdflib.BNode(x),predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/name')))
rid = g.value(predicate=rdflib.URIRef('http://purl.org/ontology/po/role'),object=rdflib.BNode(x))
pid = g.value(subject=rdflib.BNode(rid),predicate=rdflib.URIRef('http://purl.org/ontology/po/participant'))
firstname = str(g.value(subject=rdflib.BNode(pid),predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/givenName')))
lastname = str(g.value(subject=rdflib.BNode(pid),predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/familyName')))
# This ^ is a temporary fix until I work out a better DB structure
keywords[character + "^" + channel] = "Character"
keywords[character + "^" + title] = "Character"
if " " in character:
# Looks like we have a firstname + surname situation
charwords = character.split()
if charwords[0] != "Dr" and charwords[0] != "Miss" and charwords[0] != "Mr" and charwords[0] != "Mrs" and charwords[0] != "Ms" and charwords[0] != "The":
# As long as the first word isn't a title, add it as a first name
# This ^ is a temporary fix until I work out a better DB structure
keywords[charwords[0] + "^" + channel] = "Character"
keywords[charwords[0] + "^" + title] = "Character"
elif len(charwords) > 2:
# If the first word was a title, and the second word isn't a surname (checked by > 2) add the first name
# This ^ is a temporary fix until I work out a better DB structure
keywords[charwords[1] + "^" + channel] = "Character"
keywords[charwords[1] + "^" + title] = "Character"
if config.has_key(firstname + " " + lastname):
# Found a cached value
if config[firstname + " " + lastname] != "":
keywords[config[firstname + " " + lastname]] = "Actor"
else:
# Not cached yet - new request
self.send(firstname + " " + lastname, "search")
while not self.dataReady("search"):
pass
twitdata = self.recv("search")
screenname = ""
try:
for user in twitdata:
if user.has_key('verified'):
if (user['verified'] == True or user['followers_count'] > 10000) and string.lower(user['name']) == string.lower(firstname + " " + lastname):
screenname = user['screen_name']
keywords[screenname] = "Twitter"
break
except AttributeError, e:
pass
config[firstname + " " + lastname] = screenname
keywords[firstname + " " + lastname] = "Actor"
# Radio appears to have been forgotten about a bit in RDF / scheduling at the mo
# So, let's do some extra queries and see if the show title is a person's name on Twitter
if "radio" in channel or "6music" in channel or "asiannetwork" in channel or "sportsextra" in channel or "worldservice" in channel:
# However, radio shows are often named using the DJ - The cases where this isn't true will cause problems however as they'll be saved in json - DOH! TODO
if config.has_key(titlesave):
# Found a cached value
if config[titlesave] != "":
keywords[config[titlesave]] = "Twitter"
elif len(titlesave.split()) < 4: # Prevent some shows getting through at least - restricts people's names to three words
self.send(titlesave, "search")
while not self.dataReady("search"):
pass
twitdata = self.recv("search")
screenname = ""
try:
for user in twitdata:
if user.has_key('verified'):
if (user['verified'] == True or user['followers_count'] > 10000) and string.lower(user['name']) == titlesave.lower():
screenname = user['screen_name']
keywords[screenname] = "Twitter"
break
except AttributeError, e:
pass
config[titlesave] = screenname
try:
file = open(homedir + "/namecache.conf",'w')
raw_config = cjson.encode(config)
file.write(raw_config)
file.close()
except IOError, e:
Print ("Failed to save name cache - could cause rate limit problems")
return [keywords,data]
else:
if pid == None:
Print(channel , ": No change - Off Air")
else:
Print (channel , ": No change - " , title)
def main(self):
self.dbConnect()
oldkeywords = None
while not self.finished():
Print ("### Checking current programmes ###")
if self.channel != "all":
oldpid = self.channels[self.channel]
if oldpid == None:
self.db_update("""UPDATE programmes SET imported = 1 WHERE channel = %s""",(self.channel))
data = self.doStuff(self.channel)
if data != None:
keywords = data[0]
pid = data[1][0]
title = data[1][1]
offset = data[1][2]
duration = data[1][3]
timestamp = data[1][4]
utcoffset = data[1][5]
self.db_update("""UPDATE programmes SET imported = 1 WHERE pid != %s AND channel = %s""",(pid,self.channel))
self.db_select("""SELECT channel FROM programmes WHERE pid = %s AND timestamp = %s""",(pid,timestamp))
progentrytest = self.db_fetchone()
self.db_select("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
progtest2 = self.db_fetchone()
if progentrytest == None:
self.db_insert("""INSERT INTO programmes (pid,timediff,timestamp,utcoffset,channel) VALUES (%s,%s,%s)""", (pid,offset,timestamp,utcoffset,self.channel))
if progtest2 == None:
self.db_insert("""INSERT INTO programmes_unique (pid,title,duration) VALUES (%s,%s,%s)""", (pid,title,duration))
for word in keywords:
self.db_insert("""INSERT INTO keywords (pid,keyword,type) VALUES (%s,%s,%s)""", (pid,word,keywords[word]))
else:
# Fix for programmes where the duration is changed last minute
if progtest2[0] < duration:
#self.db_update("""UPDATE programmes SET duration = %s WHERE pid = %s AND timestamp = %s""",(duration,pid,timestamp))
self.db_update("""UPDATE programmes_unique SET duration = %s WHERE pid = %s""",(duration,pid))
keywords = list()
else:
keywords = None
self.db_select("""SELECT keyword FROM keywords WHERE pid = %s""",(pid))
keywordquery = self.db_fetchall()
for keyword in keywordquery:
# This ^ is a temporary fix until I work out a better DB structure
if "^" in keyword[0]:
keywords.append(string.replace(keyword[0],"^"," "))
else:
keywords.append(keyword[0])
if (keywords != oldkeywords) & (keywords != None):
Print(keywords)
self.send([keywords,[pid]],"outbox")
pass
else:
# Still need to fix the 'changed to - off air' problem, but it isn't causing twitter keyword redos thankfully (purely a Printing error)
# Possible issue will start to occur if programmes change too often - tweet stream will miss too much
keywords = list()
for channel in self.channels:
oldpid = self.channels[channel]
if oldpid == None:
self.db_update("""UPDATE programmes SET imported = 1 WHERE channel = %s""",(channel))
data = self.doStuff(channel)
if data != None:
keywordappender = data[0]
pid = data[1][0]
title = data[1][1]
offset = data[1][2]
duration = data[1][3]
timestamp = data[1][4]
utcoffset = data[1][5]
self.db_update("""UPDATE programmes SET imported = 1 WHERE pid != %s AND channel = %s""",(pid,channel))
self.db_select("""SELECT channel FROM programmes WHERE pid = %s AND timestamp = %s""",(pid,timestamp))
progentrytest = self.db_fetchone()
self.db_select("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
progtest2 = self.db_fetchone()
if progentrytest == None:
self.db_insert("""INSERT INTO programmes (pid,timediff,timestamp,utcoffset,channel) VALUES (%s,%s,%s,%s,%s)""", (pid,offset,timestamp,utcoffset,channel))
if progtest2 == None:
self.db_insert("""INSERT INTO programmes_unique (pid,title,duration) VALUES (%s,%s,%s)""", (pid,title,duration))
for word in keywordappender:
self.db_insert("""INSERT INTO keywords (pid,keyword,type) VALUES (%s,%s,%s)""", (pid,word,keywordappender[word]))
else:
# Fix for programmes where the duration is changed last minute
if progtest2[0] < duration:
#self.db_update("""UPDATE programmes SET duration = %s WHERE pid = %s AND timestamp = %s""",(duration,pid,timestamp))
self.db_update("""UPDATE programmes_unique SET duration = %s WHERE pid = %s""",(duration,pid))
currentpids = list()
for channel in self.channels:
if self.channels[channel] != "" and self.channels[channel] != None:
currentpids.append(self.channels[channel])
for pid in currentpids:
self.db_select("""SELECT keyword FROM keywords WHERE pid = %s""",(pid))
keywordquery = self.db_fetchall()
for keyword in keywordquery:
# This ^ is a temporary fix until I work out a better DB structure
if "^" in keyword[0]:
keywords.append(string.replace(keyword[0],"^"," "))
else:
keywords.append(keyword[0])
# Remove repeated keywords here
if len(keywords) != 0:
keywords = list(set(keywords))
if (keywords != oldkeywords) & (len(keywords) != 0):
Print(keywords)
self.send([keywords,currentpids],"outbox") #epicfail: now need to send all pids, and search through them further down the line
pass
oldkeywords = keywords
# At this point, find the version tags to allow further info finding
# Then, pass keywords to TwitterStream. DataCollector will pick up the data
# Must deal with errors passed back from TwitterStream here
self.firstrun = False
time.sleep(30) # Wait for 30 secs - don't need as much given the wait time between /programmes requests
# Could always get this to wait until the programme is due to change, but this *may* miss last minute schedule changes
|
{
"content_hash": "1c7a3fd0096c319ef996f34b1c7e30e9",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 181,
"avg_line_length": 56.11666666666667,
"alnum_prop": 0.4767184767184767,
"repo_name": "sparkslabs/kamaelia",
"id": "d51872a8615efead7feeb993ddd8898bb237b899",
"size": "30579",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/SocialBookmarks/Requester.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "M4",
"bytes": "12224"
},
{
"name": "Makefile",
"bytes": "150947"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "OCaml",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Python",
"bytes": "18900785"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707588"
}
],
"symlink_target": ""
}
|
"""Tests for GBDT estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.estimator_batch import estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _multiclass_train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.], [5.], [3.5], [4.6], [3.5]])
}
label = constant_op.constant([[1], [0], [0], [2], [2], [0], [1]],
dtype=dtypes.int32)
return features, label
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
_QUANTILE_REGRESSION_SIZE = 1000
def _quantile_regression_input_fns(two_dimension=False):
# The data generation is taken from
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
def g(x):
"""The function to predict."""
return x * np.cos(x)
# Training data.
x = np.atleast_2d(np.random.uniform(0, 10.0,
size=_QUANTILE_REGRESSION_SIZE)).T
x = x.astype(np.float32)
# Labels.
if not two_dimension:
y = f(x).ravel()
else:
y = np.column_stack((f(x).ravel(), g(x).ravel()))
# Add random noise.
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y_original = y.astype(np.float32)
if not two_dimension:
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
train_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=None,
shuffle=True)
# Test on the training data to make sure the predictions are calibrated.
test_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=1,
shuffle=False)
return train_input_fn, test_input_fn, y_original
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testThatLeafIndexIsInPredictions(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("leaf_index" in prediction_dict)
self.assertTrue("logits" in prediction_dict)
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
use_core_libs=True,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
model.fit(input_fn=_ranking_train_input_fn, steps=1000)
model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
model.predict(input_fn=_infer_ranking_train_input_fn)
def testDoesNotOverrideGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False)
classifier.fit(input_fn=_train_input_fn, steps=15)
# When no override of global steps, 5 steps were used.
self._assert_checkpoint(classifier.model_dir, global_step=5)
def testOverridesGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False,
override_global_step_value=10000000)
classifier.fit(input_fn=_train_input_fn, steps=15)
self._assert_checkpoint(classifier.model_dir, global_step=10000000)
def testFitAndEvaluateMultiClassTreePerClassDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns()
model_lower = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["scores"])
frac_above_lower = round(1. * np.count_nonzero(lower < y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower >= 0.92)
self.assertTrue(frac_above_lower <= 0.98)
# Multi-dimensional quantile regression.
def testQuantileRegressionMultiDimLabel(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns(
two_dimension=True)
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
label_dimension=2,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
count_below_upper = np.count_nonzero(upper > y, axis=0)
count_both_below_upper = np.count_nonzero(np.prod(upper > y, axis=1))
frac_below_upper_0 = round(1. * count_below_upper[0] / len(y), 3)
frac_below_upper_1 = round(1. * count_below_upper[1] / len(y), 3)
frac_both_below_upper = round(1. * count_both_below_upper / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper_0 >= 0.92)
self.assertTrue(frac_below_upper_0 <= 0.98)
self.assertTrue(frac_below_upper_1 >= 0.92)
self.assertTrue(frac_below_upper_1 <= 0.98)
self.assertTrue(frac_both_below_upper >= 0.92)
self.assertTrue(frac_both_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns(
two_dimension=True)
model_lower = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
label_dimension=2,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["scores"])
count_above_lower = np.count_nonzero(lower < y, axis=0)
count_both_aboce_lower = np.count_nonzero(np.prod(lower < y, axis=1))
frac_above_lower_0 = round(1. * count_above_lower[0] / len(y), 3)
frac_above_lower_1 = round(1. * count_above_lower[1] / len(y), 3)
frac_both_above_lower = round(1. * count_both_aboce_lower / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower_0 >= 0.92)
self.assertTrue(frac_above_lower_0 <= 0.98)
self.assertTrue(frac_above_lower_1 >= 0.92)
self.assertTrue(frac_above_lower_1 <= 0.98)
self.assertTrue(frac_both_above_lower >= 0.92)
self.assertTrue(frac_both_above_lower <= 0.98)
class CoreGradientBoostedDecisionTreeEstimators(test_util.TensorFlowTestCase):
def testTrainEvaluateInferDoesNotThrowError(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
est.evaluate(input_fn=_eval_input_fn, steps=1)
est.predict(input_fn=_eval_input_fn)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
est = estimator.CoreGradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
# Train for a few steps.
est.train(input_fn=_ranking_train_input_fn, steps=1000)
est.evaluate(input_fn=_ranking_train_input_fn, steps=1)
est.predict(input_fn=_infer_ranking_train_input_fn)
def testFitAndEvaluateMultiClassTreePerClasssDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testWeightedCategoricalColumn(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
feature_columns = [
core_feature_column.weighted_categorical_column(
categorical_column=core_feature_column
.categorical_column_with_vocabulary_list(
key="word", vocabulary_list=["the", "cat", "dog"]),
weight_feature_key="weight")
]
labels = np.array([[1], [1], [0], [0.]], dtype=np.float32)
def _make_input_fn():
def _input_fn():
features_dict = {}
# Sparse tensor representing
# example 0: "cat","the"
# examaple 1: "dog"
# example 2: -
# example 3: "the"
# Weights for the words are 5 - cat, 6- dog and 1 -the.
features_dict["word"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=constant_op.constant(["the", "cat", "dog", "the"],
dtype=dtypes.string),
dense_shape=[4, 3])
features_dict["weight"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=[1., 5., 6., 1.],
dense_shape=[4, 3])
return features_dict, labels
return _input_fn
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=feature_columns)
input_fn = _make_input_fn()
est.train(input_fn=input_fn, steps=100)
est.evaluate(input_fn=input_fn, steps=1)
est.predict(input_fn=input_fn)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
# 95% percentile.
model_upper = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.train(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["predictions"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns()
model_lower = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.train(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["predictions"])
frac_above_lower = round(1. * np.count_nonzero(lower < y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower >= 0.92)
self.assertTrue(frac_above_lower <= 0.98)
# Multi-dimensional quantile regression.
def testQuantileRegressionMultiDimLabel(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns(
two_dimension=True)
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 2)
# 95% percentile.
model_upper = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=100,
label_dimension=2,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.train(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["predictions"])
count_below_upper = np.count_nonzero(upper > y, axis=0)
count_both_below_upper = np.count_nonzero(np.prod(upper > y, axis=1))
frac_below_upper_0 = round(1. * count_below_upper[0] / len(y), 3)
frac_below_upper_1 = round(1. * count_below_upper[1] / len(y), 3)
frac_both_below_upper = round(1. * count_both_below_upper / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper_0 >= 0.92)
self.assertTrue(frac_below_upper_0 <= 0.98)
self.assertTrue(frac_below_upper_1 >= 0.92)
self.assertTrue(frac_below_upper_1 <= 0.98)
self.assertTrue(frac_both_below_upper >= 0.92)
self.assertTrue(frac_both_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns(
two_dimension=True)
model_lower = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
num_trees=100,
label_dimension=2,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.train(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["predictions"])
count_above_lower = np.count_nonzero(lower < y, axis=0)
count_both_aboce_lower = np.count_nonzero(np.prod(lower < y, axis=1))
frac_above_lower_0 = round(1. * count_above_lower[0] / len(y), 3)
frac_above_lower_1 = round(1. * count_above_lower[1] / len(y), 3)
frac_both_above_lower = round(1. * count_both_aboce_lower / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower_0 >= 0.92)
self.assertTrue(frac_above_lower_0 <= 0.98)
self.assertTrue(frac_above_lower_1 >= 0.92)
self.assertTrue(frac_above_lower_1 <= 0.98)
self.assertTrue(frac_both_above_lower >= 0.92)
self.assertTrue(frac_both_above_lower <= 0.98)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "7312bfc368b21fc5702f76b421664ef4",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 94,
"avg_line_length": 37.46562123039807,
"alnum_prop": 0.6773559998712129,
"repo_name": "Bismarrck/tensorflow",
"id": "ee052ac60387d8f993e4942dd7dff39e191dd3a4",
"size": "31748",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "493885"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "53117668"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1303624"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "893928"
},
{
"name": "Jupyter Notebook",
"bytes": "2657814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102511"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43480067"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497472"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from numpy import sum
from numpy import zeros
from gwlfe.Input.LandUse.NLU import NLU
from gwlfe.Input.WaterBudget.Water import Water
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Erosion.RurEros import RurEros
from gwlfe.MultiUse_Fxns.Erosion.RurEros import RurEros_f
@memoize
def ErosWashoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, NUrb, Acoef, KF, LS, C, P, Area):
result = zeros((NYrs, 16, 12))
nlu = NLU(NRur, NUrb)
water = Water(NYrs, DaysMonth, InitSnow_0, Temp, Prec)
rureros = RurEros(NYrs, DaysMonth, Temp, InitSnow_0, Prec, Acoef, NRur, KF, LS, C, P, Area)
for Y in range(NYrs):
for i in range(12):
for l in range(nlu):
result[Y, l, i] = 0
for Y in range(NYrs):
for i in range(12):
for j in range(DaysMonth[Y][i]):
if Temp[Y][i][j] > 0 and water[Y][i][j] > 0.01:
for l in range(NRur):
result[Y][l][i] = result[Y][l][i] + rureros[Y][i][j][l]
else:
pass
return result
@memoize
def ErosWashoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, Acoef, KF, LS, C, P, Area):
return sum(RurEros_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, Acoef, NRur, KF, LS, C, P, Area), axis=2)
|
{
"content_hash": "8fe7d702cabe315a4ca57876f23bbace",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 107,
"avg_line_length": 38.11764705882353,
"alnum_prop": 0.6049382716049383,
"repo_name": "WikiWatershed/gwlf-e",
"id": "f323a5529763d2c4ed093ef3f582f71eea7b9df9",
"size": "1296",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gwlfe/MultiUse_Fxns/Erosion/ErosWashoff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAMS",
"bytes": "5930291"
},
{
"name": "Python",
"bytes": "775719"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='bulkplot',
version='0.1',
description='A bulk plot generator for pandas DataFrames',
url='http://github.com/bpalmer4/bulkplot',
author='Bryan Palmer',
author_email='palmer.bryan@gmail.com',
license='MIT',
packages=['bulkplot'],
zip_safe=False)
|
{
"content_hash": "b1bee9b7a73d2fb01cf3a4efef8c0894",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6260869565217392,
"repo_name": "bpalmer4/bulkplot",
"id": "c5b0f1ad219dfb56531d54029225ea311d9183d0",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8216"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
import json
from imaginet.commands import train, evaluate
#from imaginet.defn.visual import Visual
from imaginet.defn.lm import LM
from imaginet.simple_data import phonemes, characters
from funktional.util import linear, clipped_rectify, CosineDistance
import numpy
dataset = 'coco'
datapath = "/home/gchrupala/repos/reimaginet"
epochs = 10
tokenize=phonemes
if True:
train(dataset=dataset,
datapath=datapath,
model_path='.',
task=LM,
epochs=epochs,
min_df=10,
max_norm=5.0,
scale=True,
batch_size=64,
shuffle=True,
size_embed=256,
size_hidden=1024,
depth=3,
tokenize=tokenize,
validate_period=100,
seed = 41)
|
{
"content_hash": "fcbda8e8c812ab4030e5f98f6097139b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 23.533333333333335,
"alnum_prop": 0.6813031161473088,
"repo_name": "gchrupala/reimaginet",
"id": "7e320f880117decc2ec08537ce75a32ae0329836",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lm/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "304781"
},
{
"name": "Python",
"bytes": "220454"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
}
|
import numpy as np
import datetime
# Implementation of Linear UCB with a stepwise reset of the model (on a per article basis). If an article
# is available for more than 24 hours its model is reset. This leads currently to the best score on the evaluation system.
class LinUCB:
all_articles = []
all_M = {}
all_M_inv = {}
all_b = {}
all_w = {}
alpha = 0.2
current_article = None # current recommendation
current_user = None # user for which the article was recommended
article_first_appearance = {}
def set_articles(self, articles):
self.all_articles = articles
self.counter = 0
# initialize M and b for each article:
for article in self.all_articles:
M = np.identity(6)
b = np.zeros((6, 1))
M_inv = np.identity(6) #the inverse of identity is identity
self.all_M[article] = M
self.all_b[article] = b
self.all_M_inv[article] = M_inv
self.all_w[article] = np.dot(M_inv, b)
def resetArticle(self, article):
M = np.identity(6)
b = np.zeros((6, 1))
M_inv = np.identity(6) #the inverse of identity is identity
self.all_M[article] = M
self.all_b[article] = b
self.all_M_inv[article] = M_inv
self.all_w[article] = np.dot(M_inv, b)
self.article_first_appearance.pop(article)
def ucb(self, article, user, timestamp):
M_inv = self.all_M_inv[article]
w = self.all_w[article]
ucb = np.dot(w.T, user) + self.alpha * np.sqrt(np.dot(user.T, np.dot(M_inv, user)))
if not self.article_first_appearance.has_key(article):
self.article_first_appearance[article] = timestamp
seconds_visible = timestamp - self.article_first_appearance[article]
num_hours_to_show = 24
if seconds_visible > 3600 * num_hours_to_show: # timestamp is in seconds
# reset article
self.resetArticle(article)
#hours_visible = (seconds_visible - 3600*num_hours_to_show) / float(3600)
#weight = 1 / (hours_visible + 1)
#ucb *= weight
return ucb
def recommend(self, timestamp, user_features, articles):
user_features = np.reshape(user_features, (6, 1))
best_ucb = -np.inf
for article in articles:
current_ucb = self.ucb(article, user_features, timestamp)
if current_ucb > best_ucb:
best_ucb = current_ucb
self.current_article = article
self.current_user = user_features
return self.current_article
def update(self, reward):
if reward == 0 or reward == 1:
self.counter += 1
article = self.current_article
user = self.current_user
M = self.all_M[article]
b = self.all_b[article]
self.all_M[article] = M + np.dot(user, user.T)
self.all_b[article] = b + reward * user
# precompute M^-1 and w for UCB
self.all_M_inv[article] = np.linalg.inv(self.all_M[article])
self.all_w[article] = np.dot(self.all_M_inv[article], self.all_b[article])
linucb = LinUCB()
# Evaluator will call this function and pass the article features.
# Check evaluator.py description for details.
def set_articles(art):
linucb.set_articles(art)
# This function will be called by the evaluator.
# Check task description for details.
def update(reward):
linucb.update(reward)
# This function will be called by the evaluator.
# Check task description for details.
def reccomend(timestamp, user_features, articles):
return linucb.recommend(timestamp, user_features, articles)
|
{
"content_hash": "932b02b72845dfe4f1347ec3876b1583",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 122,
"avg_line_length": 32.1551724137931,
"alnum_prop": 0.6075067024128686,
"repo_name": "lukaselmer/ethz-data-mining",
"id": "d2a6dc5eba6c30db78cf84b522fff4f1321b3d05",
"size": "3756",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "4-bandit/code/policyLinUCBStepwiseReset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11753064"
},
{
"name": "Ruby",
"bytes": "1433"
},
{
"name": "Shell",
"bytes": "159449"
},
{
"name": "TeX",
"bytes": "7461"
}
],
"symlink_target": ""
}
|
"""Binary sensor to read Proxmox VE data."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import ATTR_ATTRIBUTION, CONF_HOST, CONF_PORT
from . import CONF_CONTAINERS, CONF_NODES, CONF_VMS, PROXMOX_CLIENTS, ProxmoxItemType
ATTRIBUTION = "Data provided by Proxmox VE"
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
sensors = []
for entry in discovery_info["entries"]:
port = entry[CONF_PORT]
for node in entry[CONF_NODES]:
for virtual_machine in node[CONF_VMS]:
sensors.append(
ProxmoxBinarySensor(
hass.data[PROXMOX_CLIENTS][f"{entry[CONF_HOST]}:{port}"],
node["node"],
ProxmoxItemType.qemu,
virtual_machine,
)
)
for container in node[CONF_CONTAINERS]:
sensors.append(
ProxmoxBinarySensor(
hass.data[PROXMOX_CLIENTS][f"{entry[CONF_HOST]}:{port}"],
node["node"],
ProxmoxItemType.lxc,
container,
)
)
add_entities(sensors, True)
class ProxmoxBinarySensor(BinarySensorDevice):
"""A binary sensor for reading Proxmox VE data."""
def __init__(self, proxmox_client, item_node, item_type, item_id):
"""Initialize the binary sensor."""
self._proxmox_client = proxmox_client
self._item_node = item_node
self._item_type = item_type
self._item_id = item_id
self._vmname = None
self._name = None
self._state = None
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def is_on(self):
"""Return true if VM/container is running."""
return self._state
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
"node": self._item_node,
"vmid": self._item_id,
"vmname": self._vmname,
"type": self._item_type.name,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def update(self):
"""Check if the VM/Container is running."""
item = self.poll_item()
if item is None:
_LOGGER.warning("Failed to poll VM/container %s", self._item_id)
return
self._state = item["status"] == "running"
def poll_item(self):
"""Find the VM/Container with the set item_id."""
items = (
self._proxmox_client.get_api_client()
.nodes(self._item_node)
.get(self._item_type.name)
)
item = next(
(item for item in items if item["vmid"] == str(self._item_id)), None
)
if item is None:
_LOGGER.warning("Couldn't find VM/Container with the ID %s", self._item_id)
return None
if self._vmname is None:
self._vmname = item["name"]
if self._name is None:
self._name = f"{self._item_node} {self._vmname} running"
return item
|
{
"content_hash": "3f75f53da907677d4bafea518437e1b4",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 87,
"avg_line_length": 29.9375,
"alnum_prop": 0.5413062928720549,
"repo_name": "joopert/home-assistant",
"id": "15b1f1483e1f47add667367610c1f08453653d2a",
"size": "3353",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/proxmoxve/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from rest_framework import serializers
def get_countries_code_from_queryset(queryset):
from common.serializers import CountryPointSerializer
return list(set(map(lambda x: x.get("country_code"), CountryPointSerializer(queryset, many=True).data)))
def confirm(prompt='Confirm', default=False):
"""
https://code.activestate.com/recipes/541096-prompt-the-user-for-confirmation/
prompts for yes or no response from the user. Returns True for yes and False for no.
'resp' should be set to the default value assumed by the caller when user simply types ENTER.
>>> confirm(prompt='Create Directory?', default=True)
Create Directory? [y]|n:
True
>>> confirm(prompt='Create Directory?', default=False)
Create Directory? [n]|y:
False
>>> confirm(prompt='Create Directory?', default=False)
Create Directory? [n]|y: y
True
"""
if default:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ans = input(prompt)
if not ans:
return default
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans.lower() == 'y':
return True
if ans.lower() == 'n':
return False
def get_absolute_frontend_url(relative_url):
if not relative_url.startswith('/'):
relative_url = '/' + relative_url
host = settings.FRONTEND_HOST
if host.endswith('/'):
host = host[:-1]
return 'http://' + host + relative_url
def update_m2m_relation(obj, related_name, related_data_list, serializer_class, context=None, save_kwargs=None):
if related_data_list is None:
return
from common.serializers import CommonFileSerializer
valid_ids = []
serializer_instance = serializer_class()
for related_object_data in related_data_list:
# This is a workaround for a poorly thought out concept behind CommonFileSerializer
# where serialized value is invalid on updates
for attr_name, attr_value in list(related_object_data.items()):
if isinstance(serializer_instance.fields.get(attr_name), CommonFileSerializer) and \
not isinstance(attr_value, int):
related_object_data.pop(attr_name)
related_object = serializer_class.Meta.model.objects.filter(id=related_object_data.get('id')).first()
is_update = bool(related_object)
related_serializer = serializer_class(
instance=related_object, data=related_object_data, partial=is_update, context=context or {}
)
if related_serializer.is_valid():
related_serializer.save(**(save_kwargs or {}))
elif not is_update:
raise serializers.ValidationError({
related_name: related_serializer.errors
})
valid_ids.append(related_serializer.instance.id)
related_manager = getattr(obj, related_name)
related_manager.exclude(id__in=valid_ids).delete()
related_manager.add(*serializer_class.Meta.model.objects.filter(id__in=valid_ids))
|
{
"content_hash": "b9ae71bfcc438c9c237bfe3ef16f26d8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 112,
"avg_line_length": 35.05494505494506,
"alnum_prop": 0.6382445141065831,
"repo_name": "unicef/un-partner-portal",
"id": "b75459388bec1684e8dba5668ad183ef8260c436",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/apps/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
import numpy as np
def prepare():
if 'STORING' not in os.environ:
os.environ['STORING'] = '/data'
storing = os.getenv('STORING')
if 'TRAINING' not in os.environ:
os.environ['TRAINING'] = os.path.join(
storing, 'numerai_training_data.csv')
if 'TESTING' not in os.environ:
os.environ['TESTING'] = os.path.join(
storing, 'numerai_tournament_data.csv')
if 'PREDICTING' not in os.environ:
os.environ['PREDICTING'] = os.path.join(
storing, 'predictions.csv')
if 'PREPARED_TRAINING' not in os.environ:
os.environ['PREPARED_TRAINING'] = os.path.join(
storing, 'train_data.csv')
if 'PREPARED_VALIDATING' not in os.environ:
os.environ['PREPARED_VALIDATING'] = os.path.join(
storing, 'valid_data.csv')
if 'PREPARED_TESTING' not in os.environ:
os.environ['PREPARED_TESTING'] = os.path.join(
storing, 'test_data.csv')
def merge_tsne(selection):
prefix = os.getenv('STORING')
waiting = bool(int(os.getenv('WAITING', '0')))
each = []
each.append(os.path.join(prefix, 'tsne_2d_5p.npz'))
each.append(os.path.join(prefix, 'tsne_2d_10p.npz'))
each.append(os.path.join(prefix, 'tsne_2d_15p.npz'))
each.append(os.path.join(prefix, 'tsne_2d_30p.npz'))
each.append(os.path.join(prefix, 'tsne_2d_50p.npz'))
each.append(os.path.join(prefix, 'tsne_2d_5p_poly.npz'))
each.append(os.path.join(prefix, 'tsne_2d_10p_poly.npz'))
each.append(os.path.join(prefix, 'tsne_2d_15p_poly.npz'))
each.append(os.path.join(prefix, 'tsne_2d_30p_poly.npz'))
each.append(os.path.join(prefix, 'tsne_2d_50p_poly.npz'))
if not bool(int(os.getenv('TSNE_2D_ONLY', '0'))):
each.append(os.path.join(prefix, 'tsne_3d_30p.npz'))
while waiting:
waiting = False
for item in each:
if not os.path.isfile(item):
waiting = True
selected = [np.load(each[i]) for i in selection]
X_train = np.concatenate([item['train'] for item in selected], axis=1)
X_valid = np.concatenate([item['valid'] for item in selected], axis=1)
X_test = np.concatenate([item['test'] for item in selected], axis=1)
np.savez(
os.path.join(prefix, 'tsne.npz'),
X_train=X_train, X_valid=X_valid, X_test=X_test)
def announce(text):
sys.stdout.write('{}\n'.format('-' * 80))
sys.stdout.write('{}\n'.format(text))
sys.stdout.write('{}\n'.format('-' * 80))
sys.stdout.flush()
def remember(suffix):
predicting = os.getenv('PREDICTING')
shutil.copyfile(predicting, predicting + suffix)
def main():
operation = os.getenv('OPERATION', 'All')
prepare()
if operation in ['PrepareData', 'All']:
announce('Data Preparation')
os.system('python3 /code/prep_data.py')
if operation in ['LogisticRegression', 'All']:
announce('Simple Logistic Regression')
os.system('python3 /code/models/pipeline/simple.py')
remember('.simple')
if operation in ['tSNE2D', 'All']:
announce('t-SNE 2D')
os.system('python3 /code/fit_tsne.py')
if operation in ['tSNE3D', 'All'] and not bool(int(os.getenv('TSNE_2D_ONLY', '0'))):
announce('t-SNE 3D')
os.system('python3 /code/fit_tsne_3d.py')
if operation in ['tSNESummary', 'All']:
announce('t-SNE Summary')
merge_tsne([1])
if operation in ['TFNN', 'All']:
announce('TF NN')
os.system('python3 /code/models/classifier/main.py')
remember('.tf_classifier')
if operation in ['BasicVisualization', 'All']:
announce('Basic data visualization notebook')
os.system('python3 /code/notebooks/numerai.py')
if operation in ['AdditionalVisualization', 'All']:
announce('Additional data visualization notebook')
os.system('python3 /code/notebooks/visualization.py')
if operation in ['TFAutoencoder', 'All']:
announce('TF Autoencoder')
os.system('python3 /code/models/autoencoder/main.py')
if operation in ['TFAdversarial', 'All']:
announce('TF Adversarial')
os.system('python3 /code/models/adversarial/main.py')
if operation in ['TFPairwise', 'All']:
announce('TF Pairwise')
os.system('python3 /code/models/pairwise/main.py')
remember('.tf_pairwise')
if operation in ['Pairwise', 'All']:
announce('Pairwise Interactions')
os.system('python3 /code/models/pipeline/pairwise.py')
remember('.pairwise')
if operation in ['ParameterSearch', 'All']:
announce('Searching parameters')
os.system('python3 /code/search_params.py')
if operation in ['AdditionalLogisticRegression', 'All']:
announce('Logistic Regression')
os.system('python3 /code/models/pipeline/lr.py')
remember('.lr')
if operation in ['FactorizationMachines', 'All']:
announce('Factorization Machines')
os.system('python3 /code/models/pipeline/fm.py')
remember('.fm')
if operation in ['GradientBoostingTrees', 'All']:
announce('GBT')
os.system('python3 /code/models/pipeline/gbt.py')
remember('.gbt')
if operation in ['Ensemble', 'All']:
announce('Ensemble')
os.system('python3 /code/ensemble.py')
if operation in ['TPOT', 'All']:
announce('TPOT')
os.system('python3 /code/tpot_test.py')
remember('.tpot')
if __name__ == '__main__':
main()
|
{
"content_hash": "5d50e040972317377e332998188afb66",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 88,
"avg_line_length": 39.156028368794324,
"alnum_prop": 0.6185473646078609,
"repo_name": "altermarkive/Resurrecting-JimFleming-Numerai",
"id": "99e3284f2123f0dc94003dd37d68982ad4cef78d",
"size": "5569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ml-jimfleming--numerai/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "58046"
},
{
"name": "Matlab",
"bytes": "4820"
},
{
"name": "Python",
"bytes": "97293"
}
],
"symlink_target": ""
}
|
from subprocess import *
class configBerry:
def readConf(self,value):
var = Popen("grep --only-matching --perl-regex \"(?<=" + value + "\=).*\" /etc/torberry.conf", stdout=PIPE, shell=True).stdout.read()
var = var.replace('"','')
var = var.replace("'","")
var = var.replace("\n","")
var = var.replace("\r","")
return var
def writeConf(self,option,ovalue):
ovalue = ovalue.replace("\n","")
ovalue = ovalue.replace("\r","")
var = Popen("grep \"" + option + "=\" /etc/torberry.conf || echo " + option + "=\"" + ovalue + "\" >> /etc/torberry.conf", stdout=PIPE, shell=True).stdout.read()
var = Popen("sed -i 's/" + option + "=.*/" + option + "=\"" + ovalue + "\"/' /etc/torberry.conf || echo " + option + "=\"" + ovalue + "\" >> /etc/torberry.conf", stdout=PIPE, shell=True).stdout.read()
def genWlPass(self,ap,newpass):
var = Popen("wpa_passphrase \"" + ap + "\" \"" + newpass + "\" | grep \"psk=\" | grep -v \"#\" | cut -d\"=\" -f 2", stdout=PIPE, shell=True).stdout.read()
return var
|
{
"content_hash": "dba6259fdea2586df60e043653e24e86",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 216,
"avg_line_length": 53.095238095238095,
"alnum_prop": 0.5192825112107623,
"repo_name": "akipta/torberry",
"id": "3fea538d764cc4945b2b440fe4d4a7f9aacfc319",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extras/web/HttpServer/configBerry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "849"
},
{
"name": "Python",
"bytes": "21594"
},
{
"name": "Shell",
"bytes": "57649"
},
{
"name": "Smarty",
"bytes": "14051"
}
],
"symlink_target": ""
}
|
import numpy as np
#import theano
#import theano.tensor as T
import mlbase.networkhelper as N
import unittest
class TestGenerative(unittest.TestCase):
def test_upconv(self):
x = np.random.randn(256, 32, 28, 28)
gp = N.GlobalPooling()
# TODO
self.assertEqual(1,1)
def test_upconv1(self):
x = np.random.randn(256, 32, 28, 28)
gp = N.GlobalPooling()
# TODO
self.assertEqual(1,1)
|
{
"content_hash": "1c7614129e3f4fff7508dd38267242f0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 44,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.6140350877192983,
"repo_name": "pipehappy1/super-engine",
"id": "10b2649e25642f454de6d387e2e8f9134a283c68",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_generative.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "98291"
}
],
"symlink_target": ""
}
|
from indico.core.db import db
from indico.util.string import format_repr
class MapArea(db.Model):
__tablename__ = 'map_areas'
__table_args__ = (db.Index(None, 'is_default', unique=True, postgresql_where=db.text('is_default')),
{'schema': 'roombooking'})
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String,
nullable=False
)
is_default = db.Column(
db.Boolean,
nullable=False,
default=False
)
top_left_latitude = db.Column(
db.Float,
nullable=False
)
top_left_longitude = db.Column(
db.Float,
nullable=False
)
bottom_right_latitude = db.Column(
db.Float,
nullable=False
)
bottom_right_longitude = db.Column(
db.Float,
nullable=False
)
def __repr__(self):
return format_repr(self, 'id', 'name', is_default=False)
|
{
"content_hash": "c16b8358a54ec818bc8d026b7eab18b0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 104,
"avg_line_length": 23.317073170731707,
"alnum_prop": 0.5564853556485355,
"repo_name": "pferreir/indico",
"id": "ac0e2f12786fc5f80522e1cc9bbfcb5078a852fd",
"size": "1170",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/rb/models/map_areas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from functools import cmp_to_key
class Player:
def __init__(self, name, score):
self.name = name
self.score = score
@staticmethod
def comparator(a, b):
if a.score > b.score:
return -1
elif b.score > a.score:
return 1
elif a.name > b.name:
return 1
elif b.name > a.name:
return -1
else:
return 0
n = int(input())
data = []
for _ in range(n):
name, score = input().split()
score = int(score)
player = Player(name, score)
data.append(player)
data = sorted(data, key=cmp_to_key(Player.comparator))
for i in data:
print(i.name, i.score)
|
{
"content_hash": "3ffe44399f3b75fb89abe1b90e663606",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 54,
"avg_line_length": 21.40625,
"alnum_prop": 0.5372262773722628,
"repo_name": "Adriel-M/HackerRank",
"id": "29a1661b72b83fb57715dd41f011cae78c37b9af",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Learn/CTCI/Sorting: Comparator/comparator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20568"
},
{
"name": "C++",
"bytes": "25485"
},
{
"name": "Python",
"bytes": "26138"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView, FormView, ListView
from django.views.generic.edit import ProcessFormView, FormMixin
from django import forms
#from django.http import HttpResponseRedirect, HttpResponse
#from django.core.context_processors import csrf
#from django.template.context import RequestContext# Context
from django.utils.translation import ugettext_lazy as _
#from django.db.models import Q
from Kraggne.models import MenuItem
from Kraggne.contrib.flatblocks.utils import GetTemplatesPath
from django.http import Http404
def addSelfToContext(slug,context):
try:
page = MenuItem.objects.get(slug=slug)
context['page'] = page
for u in page.pagevar_set.all():
u.addToContext(context)
except:
pass
class GenericViewContextMixinSlug(object):
slug = ''
def get_context_data(self, **kwargs):
context = super(GenericViewContextMixinSlug, self).get_context_data(**kwargs)
addSelfToContext(self.slug,context)
return context
class GenericViewContextMixin(GenericViewContextMixinSlug):
def get_context_data(self, **kwargs):
context = super(GenericViewContextMixin, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
else:
return context
for u in page.pagevar_set.all():
u.addToContext(context)
try:
context.pop('params')
except:
pass
return context
class GenericView(GenericViewContextMixin,TemplateView):
template_name = "Kraggne/genericPage.html"
#def __init__(self,page):
# self.test = page.url
class GenericDetailView(GenericView):
template_name = "Kraggne/genericDetailPage.html"
model = None
def get_for_object(self,**kwargs):
obj = None
if hasattr(self.model,'get_object_from_url'):
obj = self.model.get_object_from_url(**kwargs)
#by pk
pk = kwargs.get('pk')
if not obj and pk:
r =self.model.objects.filter(pk=pk)
obj = r and r[0] or None
#by slug
if not obj:
slug = kwargs.get('slug')
if slug:
r =self.model.objects.filter(slug=slug)
obj = r and r[0] or None
return obj
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/detail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_context_data(self, **kwargs):
context = super(GenericDetailView, self).get_context_data(**kwargs)
context['object'] = self.get_for_object(**kwargs)
try:
context["object_model_name"] = "%s.%s" % (self.model._meta.app_label.lower(),self.model._meta.object_name.lower())
except:
pass
return context
from django.http import HttpResponseRedirect
class GenericFormView(GenericViewContextMixin,FormView):
template_name = "Kraggne/genericFormPage.html"
#success_url = None
#def __init__(self,*args,**kwargs):
# print args
# print kwargs
# super(GenericFormView,self).__init__(*args,**kwargs)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
if hasattr(form,"request"):
form.request = self.request
return form
def post(self,request,*args,**kwarg):
try:
self.page = kwarg.pop('page')
except:
pass
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form,**kwarg)
else:
return self.form_invalid(form,**kwarg)
def get_success_url(self):
if self.success_url:
return self.success_url
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
print self.slug
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return ""
def get_context_data(self, **kwargs):
context = super(GenericFormView, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
else:
page = MenuItem.objects.get(slug=self.slug)
#self.success_url = page.formblock.url or page.url
if page.url[-1] != "/":
context['action_url'] = page.url + "/"
else:
context['action_url'] = page.url
return context
def get_form_kwargs(self):
kwargs = FormMixin.get_form_kwargs(self)
if hasattr(self,'object'):
kwargs.update({'instance': self.object})
return kwargs
def form_valid(self,form):
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
return FormMixin.form_valid(self,form)
class GenericListView(GenericViewContextMixin,ListView):
template_name = "Kraggne/genericListPage.html"
paginate_by = 10
def get_context_data(self, **kwargs):
context = super(GenericListView, self).get_context_data(**kwargs)
page = self.kwargs.get('page',False)
if page:
context['page'] = page
try:
context["object_model_name"] = "%s.%s" % (self.model._meta.app_label.lower(),self.model._meta.object_name.lower())
except:
pass
return context
def paginate_queryset(self, queryset, page_size):
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page = self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_(u"Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage:
raise Http404(_(u'Invalid page (%(page_number)s)') % {
'page_number': page_number
})
def get_paginate_by(self, queryset):
if hasattr(self.model, 'paginate_by'):
return self.model.paginate_by
return self.paginate_by
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/list.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
class GenericListFormView(GenericListView,FormMixin,ProcessFormView):
template_name = "Kraggne/genericListFormPage.html"
def get_context_data(self,form=None,**kwargs):
context = GenericListView.get_context_data(self,**kwargs)
if not form:
form_class = self.get_form_class()
form = self.get_form(form_class)
context["form"] = form
page = context["page"]
if page.url[-1] != "/":
context['action_url'] = page.url + "/"
else:
context['action_url'] = page.url
return context
def post(self,request,*args,**kwarg):
self.page = kwarg.pop('page')
return ProcessFormView.post(self,request,*args,**kwarg)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/formlist.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append("%s/%s/list.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_success_url(self):
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return None
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = FormMixin.get_form_kwargs(self)
if hasattr(self,'object'):
kwargs.update({'instance': self.object})
return kwargs
def form_valid(self,form):
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
#if hasattr(self.object,'save_model'):
# self.object.save_model(self.request,form,False):
return FormMixin.form_valid(self,form)
def form_invalid(self,form):
self.object_list = self.get_queryset()
return self.render_to_response(self.get_context_data(form=form,object_list=self.object_list))
class GenericDetailFormView(GenericDetailView,FormMixin,ProcessFormView):
template_name = "Kraggne/genericDetailFormPage.html"
def get_context_data(self,form=None,**kwargs):
context = GenericDetailView.get_context_data(self,**kwargs)
if not form:
form_class = self.get_form_class()
form = self.get_form(form_class)
context["form"] = form
context['action_url'] = ""
return context
def post(self,request,*args,**kwarg):
self.page = kwarg.pop('page')
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form,**kwarg)
else:
return self.form_invalid(form,**kwarg)
def is_model_form(self):
return issubclass(self.get_form_class(),forms.ModelForm)
def get_template_names(self):
names = []
if hasattr(self.model, '_meta'):
names.append("%s/%s/formdetail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append("%s/%s/detail.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
))
names.append(self.template_name)
return names
def get_success_url(self):
if hasattr(self,'object') and self.object is not None and hasattr(self.object,'get_absolute_url'):
return self.object.get_absolute_url()
if self.slug:
page = MenuItem.objects.filter(slug=self.slug)[:1]
if page:
try:
return page[0].formblock.url
except:
return page[0].url
if self.page:
try:
return self.page.formblock.url
except:
return self.page.url
return None
# def get_form_kwargs(self):
# """
# Returns the keyword arguments for instanciating the form.
# """
# kwargs = FormMixin.get_form_kwargs(self)
# if hasattr(self,'object'):
# kwargs.update({'instance': self.object})
# return kwargs
def form_valid(self,form,**kwargs):
cur_obj = self.get_for_object(**kwargs)
form.current_object = cur_obj
if self.is_model_form():
try:
self.object = form.save(commit=True,request=self.request)
except TypeError:
self.object = form.save(commit=True)
#if hasattr(self.object,'save_model'):
# self.object.save_model(self.request,form,False):
return FormMixin.form_valid(self,form)
def form_invalid(self,form,**kwargs):
return self.render_to_response(self.get_context_data(**kwargs))
#from django.shortcuts import render_to_response
#def Generic(request,*args,**kwargs)
#form.current_object = cur_obj:
# return ''
|
{
"content_hash": "172cb77aec9b2b89be5d591d608f8910",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 126,
"avg_line_length": 31.62529832935561,
"alnum_prop": 0.5745981435363369,
"repo_name": "Krozark/Harpe-v1.0",
"id": "2c088954c85462ced2863097014ed5a1b1c57aff",
"size": "13275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Harpe-server/Harpe-website/Kraggne/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3822"
},
{
"name": "C++",
"bytes": "352558"
},
{
"name": "CSS",
"bytes": "69265"
},
{
"name": "HTML",
"bytes": "35726"
},
{
"name": "JavaScript",
"bytes": "143784"
},
{
"name": "Makefile",
"bytes": "6901"
},
{
"name": "Python",
"bytes": "258309"
},
{
"name": "Shell",
"bytes": "90"
},
{
"name": "Smarty",
"bytes": "2745"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from setuptools import find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='lcom',
packages=find_packages(exclude=['tests']),
version='0.1.0',
description='Lack of cohesion of methods metric',
long_description=readme,
author='Michal Wachowski',
author_email='wachowski.michal@gmail.com',
url='https://github.com/potfur/lcom',
download_url='https://github.com/potfur/lcom/archive/0.1.0.tar.gz',
keywords=[
'cohesion',
'code metrics',
'code quality',
'lcom',
'lcom4'
],
install_requires=[
'click',
'terminaltables'
],
test_suite='tests',
tests_require=[
'pytest',
'mock',
'flake8',
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
entry_points={
'console_scripts': ['lcom=src.command:cmd'],
}
)
|
{
"content_hash": "715ebea78f66604dc4b516b3ebdd6613",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 25.891304347826086,
"alnum_prop": 0.5818639798488665,
"repo_name": "potfur/lcom",
"id": "27092e076a114dae5246733ae5bc4a922df16eb1",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21623"
}
],
"symlink_target": ""
}
|
"""
Example Session Command
Make sure you can authenticate before running this command. This command
is currently hard coded to use the Identity service.
For example:
python -m examples.session /tenants
"""
import sys
from examples import common
from examples import connection
from openstack.identity import identity_service
def make_session(opts):
return connection.make_connection(opts).session
def run_session(opts):
argument = opts.argument
if argument is None:
raise Exception("A path argument must be specified")
sess = make_session(opts)
filtration = identity_service.IdentityService()
print("Session: %s" % sess)
print(sess.get(argument, service=filtration).text)
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_session))
|
{
"content_hash": "8922303344358de01ee7398701edcff7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 23.8,
"alnum_prop": 0.7202881152460985,
"repo_name": "mtougeron/python-openstacksdk",
"id": "fe55a7cf4d531a857dc494accb54bfc4050e3676",
"size": "1379",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1096143"
},
{
"name": "Shell",
"bytes": "3436"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
from ConfigParser import ConfigParser
from ast import literal_eval as safe_eval
import uinput
from raspberry_cereal.constants import CONFIG_PATH
def main():
"""Validates config file"""
config = ConfigParser()
config.read(CONFIG_PATH)
# Type validate each entry not in the keymap
for section in filter(
lambda s: s != 'BIT2KEY_MAP',
config.sections()
):
for option in filter(
lambda o: not o.startswith('type_'),
config.options(section)
):
if "GPIO_attr" in config.get(section, 'type_' + option):
assert hasattr(GPIO, config.get(section, option)), (
"Config validation failed. GPIO does not have "
"attr {}.".format(GPIO, config.get(section, option))
)
assert str(type(safe_eval(config.get(section, option)))) == \
"<type '{}'>".format(config.get(section, 'type_' + option)),\
("Config validation failed. {} expected type was <type '{}'>"
", got {} instead.".format(
option,
config.get(section, 'type_' + option),
str(type(safe_eval(
config.get(section, option))))))
# Validate keymap length
actual = sorted([int(option) for option in config.options('BIT2KEY_MAP')])
expected = range(int(config.get(
'RASPBERRY_CEREAL', 'bus_width')) * int(config.get(
'RASPBERRY_CEREAL', 'shift_registers')))
assert actual == expected, \
("KEY2BIT_MAP does not have items matching specification from"
" bus_width and shift_registers.\nactual: {}\nexpected: {}".format(
actual,
expected))
# Ensure keys in keymap actually exist
for key in [
config.get('BIT2KEY_MAP', key) for key in
config.options('BIT2KEY_MAP')]:
assert hasattr(uinput, key), ("The key, {}, is not valid."
.format(key))
|
{
"content_hash": "66208cea89971df4482a1d2d12f06aff",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 88,
"avg_line_length": 39.943396226415096,
"alnum_prop": 0.5403873405762872,
"repo_name": "hfaran/raspberry-cereal",
"id": "041327d4f19e29869fa65022e79c6ddc5bc88a79",
"size": "2117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raspberry_cereal/validate_cfg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9521"
},
{
"name": "Shell",
"bytes": "375"
}
],
"symlink_target": ""
}
|
from MVC.Controller import Controller
import config
if __name__ == '__main__':
controller = Controller(title=config.__CoinWatcher__)
controller.Run()
|
{
"content_hash": "937172a1b1cafafea988a45633c463b7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.6875,
"repo_name": "mgao6767/CoinWatcher",
"id": "6dce59107506fc73014941201a61d8b93742fe33",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37926"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.