commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
da1a0f5e7ffcbe37cdee484b452b5376049dd1e5 | Work with Django 1.10 as well. | datawire/mdk,datawire/mdk,datawire/mdk,datawire/mdk | python/django.py | python/django.py | """
Django middleware that enables the MDK.
"""
import atexit
from traceback import format_exception_only
from mdk import start
# Django 1.10 new-style middleware compatibility:
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class MDKSessionMiddleware(MiddlewareMixin):
"""
Add an MDK session to the Django request, as well as circuit breaker
support.
The request object will get a ``mdk_session`` attribute added to it.
"""
def __init__(self, *args, **kwargs):
MiddlewareMixin.__init__(self, *args, **kwargs)
self.mdk = start()
atexit.register(self.mdk.stop)
def process_request(self, request):
request.mdk_session = self.mdk.join(
request.META.get("HTTP_X_MDK_CONTEXT"))
request.mdk_session.start_interaction()
def process_response(self, request, response):
request.mdk_session.finish_interaction()
del request.mdk_session
return response
def process_exception(self, request, exception):
request.mdk_session.fail_interaction(
"".join(format_exception_only(exception.__class__, exception)))
| """
Django middleware that enables the MDK.
This is old-style (Django <1.10) middleware. Please see
https://docs.djangoproject.com/en/1.10/topics/http/middleware/#upgrading-middleware
if you're using Django 1.10.
"""
import atexit
from traceback import format_exception_only
from mdk import start
class MDKSessionMiddleware(object):
"""
Add an MDK session to the Django request, as well as circuit breaker
support.
The request object will get a ``mdk_session`` attribute added to it.
"""
def __init__(self):
self.mdk = start()
atexit.register(self.mdk.stop)
def process_request(self, request):
request.mdk_session = self.mdk.join(
request.META.get("HTTP_X_MDK_CONTEXT"))
request.mdk_session.start_interaction()
def process_response(self, request, response):
request.mdk_session.finish_interaction()
del request.mdk_session
return response
def process_exception(self, request, exception):
request.mdk_session.fail_interaction(
"".join(format_exception_only(exception.__class__, exception)))
| apache-2.0 | Python |
742a800d784a7f24cee1a1cdd522e2259a183f6d | make weeds, flowers and bushes instad of cars | tartley/chronotank | pyweek12/main.py | pyweek12/main.py |
import sys
from os.path import join
from random import uniform
import pyglet
from rabbyt.sprites import Sprite
from .camera import Camera
from .color import Color
from .eventloop import Eventloop
from .options import Options
from .path import DATA
from .render import Render
from .world import World
def main():
options = Options(sys.argv)
world = World()
world.background_color = Color(0.1, 0.3, 0)
def rotate(item, _, dt):
item.rot += item.angular_velocity * dt
for _ in xrange(100):
for image in ['bush.png', 'flower.png', 'weed.png']:
item = Sprite(
join(DATA, 'images', image),
x=uniform(-2000, 2000),
y=uniform(-2000, 2000),
scale=uniform(0.5, 1),
rot=uniform(0, 360),
green=uniform(0.6, 1),
)
world.add(item)
item = Sprite(
join(DATA, 'images', 'tank.png'),
x=0, y=0,
scale=1,
)
item.update = rotate
item.angular_velocity = 20
world.add(item)
window = pyglet.window.Window(
fullscreen=options.fullscreen,
vsync=options.vsync,
visible=False,
resizable=True,
)
camera = Camera((0, 0), 800)
render = Render(world, camera, options)
eventloop = Eventloop(window, world, render, options)
eventloop.run(world.update)
|
import sys
from os.path import join
from random import uniform
import pyglet
from rabbyt.sprites import Sprite
from .camera import Camera
from .eventloop import Eventloop
from .options import Options
from .path import DATA
from .render import Render
from .world import World
def main():
options = Options(sys.argv)
world = World()
def rotate(item, *_):
item.rot += item.angular_velocity
for _ in xrange(100):
item = Sprite(
join(DATA, 'images', 'car.png'),
x=uniform(-100, 100),
y=uniform(-100, 100),
scale=uniform(0.1, 0.5),
rot=uniform(0, 360),
)
item.update = rotate
item.angular_velocity = uniform(-0.1, +0.1)
world.add(item)
item = Sprite(
join(DATA, 'images', 'tank.png'),
x=uniform(-100, 100),
y=uniform(-100, 100),
scale=uniform(0.1, 0.5),
rot=uniform(0, 360),
)
item.update = rotate
item.angular_velocity = uniform(-0.1, +0.1)
world.add(item)
window = pyglet.window.Window(
fullscreen=options.fullscreen,
vsync=options.vsync,
visible=False,
resizable=True,
)
camera = Camera((0, 0), 100)
render = Render(world, camera, options)
eventloop = Eventloop(window, world, render, options)
eventloop.run(world.update)
| bsd-3-clause | Python |
b413f018dca708588170fe93532ab8cc19dc55b7 | Add filename creation output to templater | gdestuynder/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,Phrozyn/MozDef,mozilla/MozDef,mpurzynski/MozDef,gdestuynder/MozDef,mozilla/MozDef,Phrozyn/MozDef,mozilla/MozDef,mozilla/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,jeffbryner/MozDef | tests/alert_templater.py | tests/alert_templater.py | import os
import sys
# Handle python2
try:
input = raw_input
except NameError:
pass
alert_name = input('Enter your alert name (Example: proxy drop executable): ')
classname = ""
for token in alert_name.split(" "):
classname += token.title()
alert_classname = "Alert{0}".format(classname)
test_alert_classname = "Test{0}".format(classname)
filename = alert_name.replace(" ", "_")
alert_filepath = 'alerts/{0}.py'.format(filename)
test_filepath = 'tests/alerts/test_{0}.py'.format(filename)
if os.path.isfile(alert_filepath) or os.path.isfile(test_filepath):
print("ERROR: {0} already exists...exiting".format(alert_filepath))
sys.exit(1)
with open(alert_filepath, "w") as python_alert_file:
with open('alerts/alert_template.template', 'r') as alert_template_file:
alert_template_content = alert_template_file.read()
alert_template_content = alert_template_content.replace('TEMPLATE_ALERT_CLASSNAME', alert_classname)
print("Creating {0}".format(alert_filepath))
python_alert_file.write(alert_template_content)
with open(test_filepath, "w") as test_alert_file:
with open('tests/alerts/test_alert_template.template', 'r') as test_template_content:
test_template_content = test_template_content.read()
test_template_content = test_template_content.replace('TEMPLATE_TEST_CLASSNAME', test_alert_classname)
test_template_content = test_template_content.replace('TEMPLATE_ALERT_FILENAME', filename)
test_template_content = test_template_content.replace('TEMPLATE_ALERT_CLASSNAME', alert_classname)
print("Creating {0}".format(test_filepath))
test_alert_file.write(test_template_content)
| import os
import sys
# Handle python2
try:
input = raw_input
except NameError:
pass
alert_name = input('Enter your alert name (Example: proxy drop executable): ')
classname = ""
for token in alert_name.split(" "):
classname += token.title()
alert_classname = "Alert{0}".format(classname)
test_alert_classname = "Test{0}".format(classname)
filename = alert_name.replace(" ", "_")
alert_filepath = 'alerts/{0}.py'.format(filename)
test_filepath = 'tests/alerts/test_{0}.py'.format(filename)
if os.path.isfile(alert_filepath) or os.path.isfile(test_filepath):
print("ERROR: {0} already exists...exiting".format(alert_filepath))
sys.exit(1)
with open(alert_filepath, "w") as python_alert_file:
with open('alerts/alert_template.template', 'r') as alert_template_file:
alert_template_content = alert_template_file.read()
alert_template_content = alert_template_content.replace('TEMPLATE_ALERT_CLASSNAME', alert_classname)
python_alert_file.write(alert_template_content)
with open(test_filepath, "w") as test_alert_file:
with open('tests/alerts/test_alert_template.template', 'r') as test_template_content:
test_template_content = test_template_content.read()
test_template_content = test_template_content.replace('TEMPLATE_TEST_CLASSNAME', test_alert_classname)
test_template_content = test_template_content.replace('TEMPLATE_ALERT_FILENAME', filename)
test_template_content = test_template_content.replace('TEMPLATE_ALERT_CLASSNAME', alert_classname)
test_alert_file.write(test_template_content)
| mpl-2.0 | Python |
087786e67f7c57ad43e39b97838569e60f774954 | fix FrameDebugger parent calls for Python 2 | nodepy/nodepy | nodepy/utils/__init__.py | nodepy/utils/__init__.py |
import pdb
import six
import sys
from .path import pathlib
from . import context, iter, machinery, path
def as_text(x, encoding=None):
"""
Accepts a binary or unicode string and returns a unicode string. If *x* is
not a string type, a #TypeError is raised.
"""
if not isinstance(x, six.string_types):
raise TypeError('expected string, got {} instead'.format(type(x).__name__))
if not isinstance(x, six.text_type):
x = x.decode(encoding or sys.getdefaultencoding())
return x
class FrameDebugger(pdb.Pdb):
"""
This debugger allows to interact with a frame after it has completed
executing, much like #pdb.post_mortem() but without requiring a traceback.
"""
def interaction(self, frame, traceback=None):
"""
Allows you to interact with the specified *frame*. If a *traceback* is
specified, the function behaves just like #pdb.Pdb.interaction(). Use
this function for dead frames only. If you want step-by-step debugging,
use the #set_trace() method instead.
"""
# This is just a proxy function for documentation purposes.
self.reset()
return pdb.Pdb.interaction(self, frame, traceback)
def setup(self, f, tb):
if tb is not None:
return pdb.Pdb.setup(self, f, tb)
else:
# Imitate what the parent function is doing as much as possible,
# but without a traceback
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
# XXX We may still need to reproduce the following lines:
"""
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
"""
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
|
import pdb
import six
import sys
from .path import pathlib
from . import context, iter, machinery, path
def as_text(x, encoding=None):
"""
Accepts a binary or unicode string and returns a unicode string. If *x* is
not a string type, a #TypeError is raised.
"""
if not isinstance(x, six.string_types):
raise TypeError('expected string, got {} instead'.format(type(x).__name__))
if not isinstance(x, six.text_type):
x = x.decode(encoding or sys.getdefaultencoding())
return x
class FrameDebugger(pdb.Pdb):
"""
This debugger allows to interact with a frame after it has completed
executing, much like #pdb.post_mortem() but without requiring a traceback.
"""
def interaction(self, frame, traceback=None):
"""
Allows you to interact with the specified *frame*. If a *traceback* is
specified, the function behaves just like #pdb.Pdb.interaction(). Use
this function for dead frames only. If you want step-by-step debugging,
use the #set_trace() method instead.
"""
# This is just a proxy function for documentation purposes.
self.reset()
return super(FrameDebugger, self).interaction(frame, traceback)
def setup(self, f, tb):
if tb is not None:
return super(FrameDebugger, self).setup(f, tb)
else:
# Imitate what the parent function is doing as much as possible,
# but without a traceback
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
# XXX We may still need to reproduce the following lines:
"""
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
"""
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
| mit | Python |
f6cf1827eb05d85453f80fef98a14b1a0730e6cb | Remove useless assert. | isislovecruft/scramblesuit,isislovecruft/scramblesuit | packetmorpher.py | packetmorpher.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Provides code to morph a chunk of data to a given probability distribution.
The class provides an interface to morph network packet lengths to a previously
generated probability distribution. The packet lengths of the morphed network
data should then match the probability distribution.
"""
import random
import probdist
import const
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
class PacketMorpher( object ):
"""Provides an interface to morph large chunks of bytes to a given target
probability distribution. This is implemented by naively sampling the
target probability distribution."""
def __init__( self, dist=None ):
"""Initialise the PacketMorpher with a discrete probability
distribution. If none is given, a distribution is randomly
generated."""
if dist:
self.dist = dist
else:
self.dist = probdist.new(lambda: random.randint(const.HDR_LENGTH,
const.MTU))
def calcPadding( self, dataLen ):
# The source and target length of the burst's last packet.
dataLen = dataLen % const.MTU
sampleLen = self.dist.randomSample()
if sampleLen >= dataLen:
padLen = sampleLen - dataLen
else:
padLen = (const.MTU - dataLen) + sampleLen
log.debug("Morphing the last %d-byte packet to %d bytes by adding %d "
"bytes of padding." %
(dataLen % const.MTU, sampleLen, padLen))
return padLen
def morph( self, dataLen ):
"""Based on `dataLen', the length of the data to morph, this function
returns a chopper function which is used to chop the data as well as
the length of padding which is appended to the last protocol
message."""
breakPoints = []
lastBreakPoint = 0
progress = 0
while progress < dataLen:
newBreakPoint = progress + self.dist.randomSample()
breakPoints.append((lastBreakPoint, newBreakPoint))
lastBreakPoint = newBreakPoint
progress = newBreakPoint
paddingLen = progress - dataLen
breakPoints.append((lastBreakPoint, progress))
chopper = lambda data: [data[x:y] for (x, y) in breakPoints]
return (chopper, paddingLen)
def randomSample( self ):
"""Return a random sample of the stored probability distribution."""
return self.dist.randomSample()
# Alias class name in order to provide a more intuitive API.
new = PacketMorpher
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Provides code to morph a chunk of data to a given probability distribution.
The class provides an interface to morph network packet lengths to a previously
generated probability distribution. The packet lengths of the morphed network
data should then match the probability distribution.
"""
import random
import probdist
import const
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
class PacketMorpher( object ):
"""Provides an interface to morph large chunks of bytes to a given target
probability distribution. This is implemented by naively sampling the
target probability distribution."""
def __init__( self, dist=None ):
"""Initialise the PacketMorpher with a discrete probability
distribution. If none is given, a distribution is randomly
generated."""
assert len(secret) == const.SHARED_SECRET_LENGTH
if dist:
self.dist = dist
else:
self.dist = probdist.new(lambda: random.randint(const.HDR_LENGTH,
const.MTU))
def calcPadding( self, dataLen ):
# The source and target length of the burst's last packet.
dataLen = dataLen % const.MTU
sampleLen = self.dist.randomSample()
if sampleLen >= dataLen:
padLen = sampleLen - dataLen
else:
padLen = (const.MTU - dataLen) + sampleLen
log.debug("Morphing the last %d-byte packet to %d bytes by adding %d "
"bytes of padding." %
(dataLen % const.MTU, sampleLen, padLen))
return padLen
def morph( self, dataLen ):
"""Based on `dataLen', the length of the data to morph, this function
returns a chopper function which is used to chop the data as well as
the length of padding which is appended to the last protocol
message."""
breakPoints = []
lastBreakPoint = 0
progress = 0
while progress < dataLen:
newBreakPoint = progress + self.dist.randomSample()
breakPoints.append((lastBreakPoint, newBreakPoint))
lastBreakPoint = newBreakPoint
progress = newBreakPoint
paddingLen = progress - dataLen
breakPoints.append((lastBreakPoint, progress))
chopper = lambda data: [data[x:y] for (x, y) in breakPoints]
return (chopper, paddingLen)
def randomSample( self ):
"""Return a random sample of the stored probability distribution."""
return self.dist.randomSample()
# Alias class name in order to provide a more intuitive API.
new = PacketMorpher
| bsd-3-clause | Python |
084ff6d007017ab13fc3d8be056995fc3d2f9d29 | Bump version. | ryankask/django-discoverage,ryankask/django-discoverage | discoverage/__init__.py | discoverage/__init__.py | from discoverage.runner import DiscoverageRunner
__version__ = '0.3.0'
| from discoverage.runner import DiscoverageRunner
__version__ = '0.2.1'
| bsd-2-clause | Python |
1cef1da1e1da1b6592e1a656f8266325d10b0161 | Add missing whitespace to error message | projectatomic/atomic-reactor,DBuildService/atomic-reactor,projectatomic/atomic-reactor,fr34k8/atomic-reactor,DBuildService/atomic-reactor,fr34k8/atomic-reactor | atomic_reactor/plugins/pre_check_user_settings.py | atomic_reactor/plugins/pre_check_user_settings.py | """
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
from atomic_reactor.constants import PLUGIN_CHECK_USER_SETTINGS
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.util import (
has_operator_appregistry_manifest,
has_operator_bundle_manifest,
)
class CheckUserSettingsPlugin(PreBuildPlugin):
"""
Pre plugin will check user settings on early phase to fail early and save resources.
Aim of this plugin to checks:
* Dockerfile
* container.yaml
* git repo
for incorrect options or mutually exclusive options
"""
key = PLUGIN_CHECK_USER_SETTINGS
is_allowed_to_fail = False
def __init__(self, tasker, workflow, flatpak=False):
"""
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param flatpak: bool, if build is for flatpak
"""
super(CheckUserSettingsPlugin, self).__init__(tasker, workflow)
self.flatpak = flatpak
def dockerfile_checks(self):
"""Checks for Dockerfile"""
if self.flatpak:
self.log.info(
"Skipping Dockerfile checks because this is flatpak build "
"without user Dockerfile")
return
self.appregistry_bundle_label_mutually_exclusive()
def appregistry_bundle_label_mutually_exclusive(self):
"""Labels com.redhat.com.delivery.appregistry and
com.redhat.delivery.operator.bundle
are mutually exclusive. Fail when both are specified.
"""
msg = (
"only one of labels com.redhat.com.delivery.appregistry "
"and com.redhat.delivery.operator.bundle is allowed"
)
self.log.debug("Running check: %s", msg)
if (
has_operator_appregistry_manifest(self.workflow) and
has_operator_bundle_manifest(self.workflow)
):
raise ValueError(msg)
def run(self):
"""
run the plugin
"""
self.dockerfile_checks()
| """
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
from atomic_reactor.constants import PLUGIN_CHECK_USER_SETTINGS
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.util import (
has_operator_appregistry_manifest,
has_operator_bundle_manifest,
)
class CheckUserSettingsPlugin(PreBuildPlugin):
"""
Pre plugin will check user settings on early phase to fail early and save resources.
Aim of this plugin to checks:
* Dockerfile
* container.yaml
* git repo
for incorrect options or mutually exclusive options
"""
key = PLUGIN_CHECK_USER_SETTINGS
is_allowed_to_fail = False
def __init__(self, tasker, workflow, flatpak=False):
"""
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param flatpak: bool, if build is for flatpak
"""
super(CheckUserSettingsPlugin, self).__init__(tasker, workflow)
self.flatpak = flatpak
def dockerfile_checks(self):
"""Checks for Dockerfile"""
if self.flatpak:
self.log.info(
"Skipping Dockerfile checks because this is flatpak build "
"without user Dockerfile")
return
self.appregistry_bundle_label_mutually_exclusive()
def appregistry_bundle_label_mutually_exclusive(self):
"""Labels com.redhat.com.delivery.appregistry and
com.redhat.delivery.operator.bundle
are mutually exclusive. Fail when both are specified.
"""
msg = (
"only one of labels com.redhat.com.delivery.appregistry"
"and com.redhat.delivery.operator.bundle is allowed"
)
self.log.debug("Running check: %s", msg)
if (
has_operator_appregistry_manifest(self.workflow) and
has_operator_bundle_manifest(self.workflow)
):
raise ValueError(msg)
def run(self):
"""
run the plugin
"""
self.dockerfile_checks()
| bsd-3-clause | Python |
fa5b8686afc1949c1a0e391a38d425c745357d5d | remove superfluous logging | XertroV/nodeup-xk-io,XertroV/nodeup-xk-io,XertroV/nodeup-xk-io,XertroV/nodeup-xk-io | nodeup-server/tweeter.py | nodeup-server/tweeter.py | #!/usr/bin/env python3
import time
import logging
import tweepy
from models import twitter_access_token, twitter_access_secret, twitter_consumer_secret, twitter_consumer_key, tweet_queue
logging.basicConfig(level=logging.INFO)
def get_api():
auth = tweepy.OAuthHandler(twitter_consumer_key.get(), twitter_consumer_secret.get())
auth.set_access_token(twitter_access_token.get(), twitter_access_secret.get())
return tweepy.API(auth)
if __name__ == "__main__":
api = get_api()
while True:
if len(tweet_queue) > 0:
tweet = tweet_queue.popleft().decode()
try:
logging.info('Tweeting %s' % repr(tweet))
api.update_status(status=tweet)
except Exception as e:
tweet_queue.prepend(tweet)
logging.error('Tweet failed: %s gave error %s' % (tweet, repr(e)))
raise e
time.sleep(5)
| #!/usr/bin/env python3
import time
import logging
import tweepy
from models import twitter_access_token, twitter_access_secret, twitter_consumer_secret, twitter_consumer_key, tweet_queue
logging.basicConfig(level=logging.INFO)
def get_api():
auth = tweepy.OAuthHandler(twitter_consumer_key.get(), twitter_consumer_secret.get())
auth.set_access_token(twitter_access_token.get(), twitter_access_secret.get())
return tweepy.API(auth)
if __name__ == "__main__":
api = get_api()
while True:
if len(tweet_queue) > 0:
tweet = tweet_queue.popleft().decode()
try:
logging.info('Tweeting %s' % repr(tweet))
logging.info(str(type(tweet)))
api.update_status(status=tweet)
except Exception as e:
tweet_queue.prepend(tweet)
logging.error('Tweet failed: %s gave error %s' % (tweet, repr(e)))
raise e
time.sleep(5)
| mit | Python |
f3c88a8dfe1b70796594f34ce1dcb81d60b424bc | Fix bitrot in ipmi deployment tests. | henn/hil,meng-sun/hil,SahilTikale/haas,henn/hil_sahil,henn/hil,kylehogan/hil,henn/haas,kylehogan/haas,meng-sun/hil,CCI-MOC/haas,kylehogan/hil,henn/hil_sahil | tests/deployment/ipmi.py | tests/deployment/ipmi.py | # Copyright 2013-2015 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Unit tests for headnodes.
These require an actual libvirt daemon (and full HaaS setup), and are
somewhat particular to the MOC's development environment. They may be
difficult to run in other contexts.
"""
from haas.test_common import *
from haas import config, server, rest
import pytest
@pytest.fixture
def configure():
config_testsuite()
config.load_extensions()
@pytest.fixture
def db(request):
return fresh_database(request)
@pytest.fixture
def server_init():
server.register_drivers()
server.validate_state()
with_request_context = pytest.yield_fixture(with_request_context)
headnode_cleanup = pytest.fixture(headnode_cleanup)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'db',
'with_request_context',
'headnode_cleanup')
class TestIpmi():
""" Test IPMI driver calls using functions included in the IPMI driver. """
def collect_nodes(self, db):
""" Collects nodes in the free list.
Raises error if free-nodes are less than 2.
"""
free_nodes = db.query(Node).filter_by(project_id=None).all()
return free_nodes
def test_node_power_cycle(self, db):
nodes = self.collect_nodes(db)
for node in nodes:
api.node_power_cycle(node.label)
def test_node_power_off(self, db):
nodes = self.collect_nodes(db)
for node in nodes:
api.node_power_off(node.label)
| # Copyright 2013-2015 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Unit tests for headnodes.
These require an actual libvirt daemon (and full HaaS setup), and are
somewhat particular to the MOC's development environment. They may be
difficult to run in other contexts.
"""
from haas.test_common import *
from haas import config, server, rest
import pytest
@pytest.fixture
def configure():
config_testsuite()
config.load_extensions()
@pytest.fixture
def db(request):
return fresh_database(request)
@pytest.fixture
def server_init():
server.register_drivers()
server.validate_state()
@pytest.yield_fixture
def with_request_context():
with rest.RequestContext():
yield
headnode_cleanup = pytest.fixture(headnode_cleanup)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'db',
'with_request_context',
'headnode_cleanup')
class TestIpmi():
""" Test IPMI driver calls using functions included in the IPMI driver. """
def collect_nodes(self, db):
""" Collects nodes in the free list.
Raises error if free-nodes are less than 2.
"""
free_nodes = db.query(Node).filter_by(project_id=None).all()
return free_nodes
def test_node_power_cycle(self, db):
nodes = self.collect_nodes(db)
for node in nodes:
api.node_power_cycle(node.label)
def test_node_power_off(self, db):
nodes = self.collect_nodes(db)
for node in nodes:
api.node_power_off(node.label)
| apache-2.0 | Python |
de99a7e665bcba140865ae8f7cfbb3264541f748 | Return a PartialOrderTuple from dimension_sort() | opesci/devito,opesci/devito | devito/ir/equations/algorithms.py | devito/ir/equations/algorithms.py | from operator import attrgetter
from devito.dimension import Dimension
from devito.symbolics import retrieve_indexed, split_affine
from devito.tools import PartialOrderTuple, filter_sorted, flatten
__all__ = ['dimension_sort']
def dimension_sort(expr):
"""
Topologically sort the :class:`Dimension`s in ``expr``, based on the order
in which they appear within :class:`Indexed`s.
"""
def handle_indexed(indexed):
relation = []
for i in indexed.indices:
try:
maybe_dim = split_affine(i).var
if isinstance(maybe_dim, Dimension):
relation.append(maybe_dim)
except ValueError:
# Maybe there are some nested Indexeds (e.g., the situation is A[B[i]])
nested = flatten(handle_indexed(n) for n in retrieve_indexed(i))
if nested:
relation.extend(nested)
else:
# Fallback: Just insert all the Dimensions we find, regardless of
# what the user is attempting to do
relation.extend([d for d in filter_sorted(i.free_symbols)
if isinstance(d, Dimension)])
return tuple(relation)
relations = [handle_indexed(i) for i in retrieve_indexed(expr, mode='all')]
# Add in leftover free dimensions (not an Indexed' index)
extra = set([i for i in expr.free_symbols if isinstance(i, Dimension)])
# Add in pure data dimensions (e.g., those accessed only via explicit values,
# such as A[3])
indexeds = retrieve_indexed(expr, deep=True)
extra.update(set().union(*[set(i.function.indices) for i in indexeds]))
# Enforce determinism
extra = filter_sorted(extra, key=attrgetter('name'))
# Add in implicit relations for parent dimensions
# Note that (d.parent, d) is what we want, while (d, d.parent) would be wrong;
# for example, in `((t, time), (t, x, y), (x, y))`, `x` could now preceed
# `time`, while `t`, and therefore `time`, *must* appear before `x`, as
# indicated by the second relation
relations.extend([(d.parent, d) for d in extra if d.is_Derived])
ordering = PartialOrderTuple(extra, relations=relations)
return ordering
| from operator import attrgetter
from devito.dimension import Dimension
from devito.symbolics import retrieve_indexed, split_affine
from devito.tools import filter_sorted, flatten, toposort
__all__ = ['dimension_sort']
def dimension_sort(expr, key=None):
"""
Topologically sort the :class:`Dimension`s in ``expr``, based on the order
in which they appear within :class:`Indexed`s.
:param expr: The :class:`devito.Eq` from which the :class:`Dimension`s are
extracted.
:param key: A callable used as key to enforce a final ordering.
"""
def handle_indexed(indexed):
constraint = []
for i in indexed.indices:
try:
maybe_dim = split_affine(i).var
if isinstance(maybe_dim, Dimension):
constraint.append(maybe_dim)
except ValueError:
# Maybe there are some nested Indexeds (e.g., the situation is A[B[i]])
nested = flatten(handle_indexed(n) for n in retrieve_indexed(i))
if nested:
constraint.extend(nested)
else:
# Fallback: Just insert all the Dimensions we find, regardless of
# what the user is attempting to do
constraint.extend([d for d in filter_sorted(i.free_symbols)
if isinstance(d, Dimension)])
return constraint
constraints = [handle_indexed(i) for i in retrieve_indexed(expr, mode='all')]
ordering = toposort(constraints)
# Add in leftover free dimensions (not an Indexed' index)
extra = set([i for i in expr.free_symbols if isinstance(i, Dimension)])
# Add in pure data dimensions (e.g., those accessed only via explicit values,
# such as A[3])
indexeds = retrieve_indexed(expr, deep=True)
if indexeds:
extra.update(set.union(*[set(i.function.indices) for i in indexeds]))
# Enforce determinism
extra = filter_sorted(extra, key=attrgetter('name'))
ordering.extend([i for i in extra if i not in ordering])
# Add in parent dimensions
for i in list(ordering):
if i.is_Derived and i.parent not in ordering:
ordering.insert(ordering.index(i), i.parent)
return sorted(ordering, key=key)
| mit | Python |
5292a379a7ebe316f92cae55d5af82b5323b9c1b | raise an exception if site_user_id is not passed to the interface server | hiidef/hiispider,hiidef/hiispider | hiispider/metacomponents/interface.py | hiispider/metacomponents/interface.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Executes new jobs on behalf of the Django application."""
from uuid import uuid4
from twisted.internet.defer import inlineCallbacks, returnValue
from hiispider.components import *
from hiispider.metacomponents import *
import logging
LOGGER = logging.getLogger(__name__)
class Interface(Component):
allow_clients = False
requires = [MySQL, PageGetter, Cassandra, Logger]
def __init__(self, server, config, server_mode, **kwargs):
super(Interface, self).__init__(server, server_mode)
self.mysql = self.server.mysql # For legacy plugins.
def initialize(self):
exposed = [x for x in self.server.functions.items() if x[1]["interval"] > 0]
for f in self.server.functions.values():
f["pass_kwargs_to_callback"] = True
for function_name, func in exposed:
self.server.add_callback(function_name, self._execute_callback)
self.server.add_errback(function_name, self._execute_errback)
LOGGER.debug("Added %s callback and errback." % function_name)
# disable fast cache on the interface server
self.server.config['enable_fast_cache'] = False
@inlineCallbacks
def _execute_callback(self, data, kwargs):
uuid = uuid4().hex
# FIXME: what should we do if there's no site_user_id?
user_id = kwargs.get('site_user_id', '')
if not user_id:
raise Exception("site_user_id is a required keyword argument.")
yield self.server.cassandra.setData(user_id, data, uuid)
returnValue({uuid:data})
def _execute_errback(self, error):
return error
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Executes new jobs on behalf of the Django application."""
from uuid import uuid4
from twisted.internet.defer import inlineCallbacks, returnValue
from hiispider.components import *
from hiispider.metacomponents import *
import logging
LOGGER = logging.getLogger(__name__)
class Interface(Component):
allow_clients = False
requires = [MySQL, PageGetter, Cassandra, Logger]
def __init__(self, server, config, server_mode, **kwargs):
super(Interface, self).__init__(server, server_mode)
self.mysql = self.server.mysql # For legacy plugins.
def initialize(self):
exposed = [x for x in self.server.functions.items() if x[1]["interval"] > 0]
for f in self.server.functions.values():
f["pass_kwargs_to_callback"] = True
for function_name, func in exposed:
self.server.add_callback(function_name, self._execute_callback)
self.server.add_errback(function_name, self._execute_errback)
LOGGER.debug("Added %s callback and errback." % function_name)
# disable fast cache on the interface server
self.server.config['enable_fast_cache'] = False
@inlineCallbacks
def _execute_callback(self, data, kwargs):
uuid = uuid4().hex
# FIXME: what should we do if there's no site_user_id?
user_id = kwargs.get('site_user_id', '')
yield self.server.cassandra.setData(user_id, data, uuid)
returnValue({uuid:data})
def _execute_errback(self, error):
return error
| mit | Python |
87d15e732c0465ae2e243bff5d4f58cd620444b5 | Use NVE method in test instead of Brownian | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/md/pytest/test_zero_momentum.py | hoomd/md/pytest/test_zero_momentum.py | import hoomd
import numpy as np
import pytest
def test_before_attaching():
trigger = hoomd.trigger.Periodic(100)
zm = hoomd.md.update.ZeroMomentum(trigger)
assert zm.trigger is trigger
trigger = hoomd.trigger.Periodic(10, 30)
zm.trigger = trigger
assert zm.trigger is trigger
def test_after_attaching(simulation_factory,
two_particle_snapshot_factory):
sim = simulation_factory(two_particle_snapshot_factory())
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nve])
trigger = hoomd.trigger.Periodic(100)
zm = hoomd.md.update.ZeroMomentum(trigger)
sim.operations.add(zm)
sim.run(0)
assert zm.trigger is trigger
trigger = hoomd.trigger.Periodic(10, 30)
zm.trigger = trigger
assert zm.trigger is trigger
sim.run(100)
def test_momentum_is_zero(simulation_factory,
two_particle_snapshot_factory):
snap = two_particle_snapshot_factory()
if snap.exists:
snap.particles.velocity[0] = [0, 0, 0]
snap.particles.velocity[1] = [2, 0, 0]
snap.particles.mass[0] = 1
snap.particles.mass[1] = 1
sim = simulation_factory(snap)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nve])
zm = hoomd.md.update.ZeroMomentum(hoomd.trigger.Periodic(1))
sim.operations.add(zm)
sim.run(0)
masses = sim.state.snapshot.particles.mass
velocities = sim.state.snapshot.particles.velocity
p = [sum([m * v[i] for m, v in zip(masses, velocities)]) for i in range(3)]
assert p == [2, 0, 0]
sim.run(1)
masses = sim.state.snapshot.particles.mass
velocities = sim.state.snapshot.particles.velocity
for i in range(3):
pi = sum([m * v[i] for m, v in zip(masses, velocities)])
assert pi == 0
| import hoomd
import numpy as np
import pytest
def test_before_attaching():
trigger = hoomd.trigger.Periodic(100)
zm = hoomd.md.update.ZeroMomentum(trigger)
assert zm.trigger is trigger
trigger = hoomd.trigger.Periodic(10, 30)
zm.trigger = trigger
assert zm.trigger is trigger
def test_after_attaching(simulation_factory,
two_particle_snapshot_factory):
brownian = hoomd.md.methods.Brownian(filter=hoomd.filter.All(),
kT=hoomd.variant.Constant(2.0),
seed=2)
sim = simulation_factory(two_particle_snapshot_factory())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[brownian])
trigger = hoomd.trigger.Periodic(100)
zm = hoomd.md.update.ZeroMomentum(trigger)
sim.operations.add(zm)
sim.run(0)
assert zm.trigger is trigger
trigger = hoomd.trigger.Periodic(10, 30)
zm.trigger = trigger
assert zm.trigger is trigger
sim.run(100)
def test_momentum_is_zero(simulation_factory,
two_particle_snapshot_factory):
snap = two_particle_snapshot_factory()
if snap.exists:
snap.particles.velocity[0] = [0, 0, 0]
snap.particles.velocity[1] = [2, 0, 0]
snap.particles.mass[0] = 1
snap.particles.mass[1] = 1
sim = simulation_factory(snap)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nve])
zm = hoomd.md.update.ZeroMomentum(hoomd.trigger.Periodic(1))
sim.operations.add(zm)
sim.run(0)
masses = sim.state.snapshot.particles.mass
velocities = sim.state.snapshot.particles.velocity
p = [sum([m * v[i] for m, v in zip(masses, velocities)]) for i in range(3)]
assert p == [2, 0, 0]
sim.run(1)
masses = sim.state.snapshot.particles.mass
velocities = sim.state.snapshot.particles.velocity
for i in range(3):
pi = sum([m * v[i] for m, v in zip(masses, velocities)])
assert pi == 0
| bsd-3-clause | Python |
2382c2df2da5a2642c67933ecc89995ea6e35bb1 | Fix Jasmine unit tests failing | Dark-Hacker/horizon,Metaswitch/horizon,endorphinl/horizon,davidcusatis/horizon,idjaw/horizon,doug-fish/horizon,django-leonardo/horizon,bigswitch/horizon,wangxiangyu/horizon,mdavid/horizon,NCI-Cloud/horizon,wolverineav/horizon,xinwu/horizon,philoniare/horizon,tellesnobrega/horizon,j4/horizon,kfox1111/horizon,j4/horizon,saydulk/horizon,bigswitch/horizon,doug-fish/horizon,xinwu/horizon,BiznetGIO/horizon,Hodorable/0602,Mirantis/mos-horizon,Dark-Hacker/horizon,liyitest/rr,maestro-hybrid-cloud/horizon,eayunstack/horizon,Daniex/horizon,tqtran7/horizon,Hodorable/0602,mandeepdhami/horizon,doug-fish/horizon,bac/horizon,bac/horizon,watonyweng/horizon,endorphinl/horizon-fork,openstack/horizon,anthonydillon/horizon,sandvine/horizon,icloudrnd/automation_tools,FNST-OpenStack/horizon,dan1/horizon-proto,yjxtogo/horizon,luhanhan/horizon,karthik-suresh/horizon,ChameleonCloud/horizon,noironetworks/horizon,luhanhan/horizon,Solinea/horizon,davidcusatis/horizon,NCI-Cloud/horizon,davidcusatis/horizon,sandvine/horizon,promptworks/horizon,redhat-cip/horizon,promptworks/horizon,dan1/horizon-proto,CiscoSystems/horizon,henaras/horizon,liyitest/rr,wangxiangyu/horizon,NCI-Cloud/horizon,xinwu/horizon,ChameleonCloud/horizon,newrocknj/horizon,redhat-cip/horizon,bigswitch/horizon,BiznetGIO/horizon,FNST-OpenStack/horizon,kfox1111/horizon,icloudrnd/automation_tools,yeming233/horizon,mdavid/horizon,j4/horizon,django-leonardo/horizon,wolverineav/horizon,newrocknj/horizon,saydulk/horizon,luhanhan/horizon,NeCTAR-RC/horizon,VaneCloud/horizon,noironetworks/horizon,yjxtogo/horizon,takeshineshiro/horizon,agileblaze/OpenStackTwoFactorAuthentication,saydulk/horizon,doug-fish/horizon,redhat-openstack/horizon,bigswitch/horizon,agileblaze/OpenStackTwoFactorAuthentication,NCI-Cloud/horizon,Tesora/tesora-horizon,vladryk/horizon,maestro-hybrid-cloud/horizon,CiscoSystems/horizon,redhat-openstack/horizon,sandvine/horizon,dan1/horizon-x509,mandeepdhami/horizon,ChameleonCloud/horizon,yeming233/horizon,Metaswitch/horizon,tellesnobrega/horizon,damien-dg/horizon,NeCTAR-RC/horizon,blueboxgroup/horizon,karthik-suresh/horizon,blueboxgroup/horizon,Solinea/horizon,philoniare/horizon,BiznetGIO/horizon,pranavtendolkr/horizon,damien-dg/horizon,yeming233/horizon,liyitest/rr,bac/horizon,openstack/horizon,eayunstack/horizon,mandeepdhami/horizon,redhat-cip/horizon,endorphinl/horizon-fork,mdavid/horizon,django-leonardo/horizon,j4/horizon,pranavtendolkr/horizon,redhat-openstack/horizon,icloudrnd/automation_tools,Mirantis/mos-horizon,endorphinl/horizon,VaneCloud/horizon,wangxiangyu/horizon,RudoCris/horizon,xinwu/horizon,dan1/horizon-proto,CiscoSystems/horizon,Solinea/horizon,wangxiangyu/horizon,newrocknj/horizon,vladryk/horizon,damien-dg/horizon,CiscoSystems/horizon,icloudrnd/automation_tools,watonyweng/horizon,idjaw/horizon,endorphinl/horizon-fork,tqtran7/horizon,Daniex/horizon,liyitest/rr,maestro-hybrid-cloud/horizon,Solinea/horizon,sandvine/horizon,Hodorable/0602,takeshineshiro/horizon,takeshineshiro/horizon,RudoCris/horizon,yjxtogo/horizon,pranavtendolkr/horizon,Daniex/horizon,noironetworks/horizon,kfox1111/horizon,noironetworks/horizon,anthonydillon/horizon,wolverineav/horizon,dan1/horizon-x509,promptworks/horizon,Tesora/tesora-horizon,VaneCloud/horizon,agileblaze/OpenStackTwoFactorAuthentication,blueboxgroup/horizon,pranavtendolkr/horizon,philoniare/horizon,Mirantis/mos-horizon,henaras/horizon,BiznetGIO/horizon,Metaswitch/horizon,coreycb/horizon,blueboxgroup/horizon,tqtran7/horizon,takeshineshiro/horizon,coreycb/horizon,NeCTAR-RC/horizon,davidcusatis/horizon,yjxtogo/horizon,idjaw/horizon,mandeepdhami/horizon,Hodorable/0602,eayunstack/horizon,RudoCris/horizon,django-leonardo/horizon,watonyweng/horizon,karthik-suresh/horizon,vladryk/horizon,Daniex/horizon,gerrive/horizon,idjaw/horizon,henaras/horizon,watonyweng/horizon,Dark-Hacker/horizon,openstack/horizon,dan1/horizon-x509,philoniare/horizon,tqtran7/horizon,damien-dg/horizon,endorphinl/horizon-fork,NeCTAR-RC/horizon,saydulk/horizon,redhat-cip/horizon,anthonydillon/horizon,dan1/horizon-x509,endorphinl/horizon,VaneCloud/horizon,FNST-OpenStack/horizon,luhanhan/horizon,izadorozhna/dashboard_integration_tests,anthonydillon/horizon,yeming233/horizon,newrocknj/horizon,bac/horizon,dan1/horizon-proto,Tesora/tesora-horizon,gerrive/horizon,Tesora/tesora-horizon,henaras/horizon,Metaswitch/horizon,izadorozhna/dashboard_integration_tests,maestro-hybrid-cloud/horizon,tellesnobrega/horizon,wolverineav/horizon,gerrive/horizon,mdavid/horizon,kfox1111/horizon,endorphinl/horizon,FNST-OpenStack/horizon,coreycb/horizon,tellesnobrega/horizon,RudoCris/horizon,agileblaze/OpenStackTwoFactorAuthentication,redhat-openstack/horizon,karthik-suresh/horizon,vladryk/horizon,ChameleonCloud/horizon,Dark-Hacker/horizon,openstack/horizon,Mirantis/mos-horizon,coreycb/horizon,gerrive/horizon,promptworks/horizon | horizon/test/jasmine/jasmine_tests.py | horizon/test/jasmine/jasmine_tests.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon.test import helpers as test
class ServicesTests(test.JasmineTests):
sources = [
'horizon/js/horizon.js',
'horizon/js/angular/horizon.conf.js',
'horizon/js/angular/horizon.js',
'horizon/js/angular/services/horizon.utils.js',
'horizon/js/angular/hz.api.module.js',
'horizon/js/angular/services/hz.api.service.js',
'angular/widget.module.js',
'angular/bind-scope/bind-scope.js',
'angular/charts/charts.js',
'angular/charts/chart-tooltip.js',
'angular/charts/pie-chart.js',
'angular/help-panel/help-panel.js',
'angular/metadata-tree/metadata-tree-service.js',
'angular/modal/modal.js',
'angular/table/table.js',
'angular/transfer-table/transfer-table.js',
'angular/wizard/wizard.js',
]
specs = [
'horizon/js/angular/services/hz.api.service.spec.js',
'horizon/tests/jasmine/metadataWidgetControllerSpec.js',
'horizon/tests/jasmine/utilsSpec.js',
'angular/bind-scope/bind-scope.spec.js',
'angular/charts/pie-chart.spec.js',
'angular/help-panel/help-panel.spec.js',
'angular/modal/simple-modal.spec.js',
'angular/table/table.spec.js',
'angular/transfer-table/transfer-table.spec.js',
'angular/wizard/wizard.spec.js',
]
externalTemplates = [
'angular/charts/chart-tooltip.html',
'angular/charts/pie-chart.html',
'angular/help-panel/help-panel.html',
'angular/transfer-table/transfer-table.html',
'angular/wizard/wizard.html',
]
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon.test import helpers as test
class ServicesTests(test.JasmineTests):
sources = [
'horizon/js/horizon.js',
'horizon/js/angular/horizon.conf.js',
'horizon/js/angular/horizon.js',
'horizon/js/angular/services/horizon.utils.js',
'horizon/js/angular/controllers/metadata-widget-controller.js',
'horizon/js/angular/hz.api.module.js',
'horizon/js/angular/services/hz.api.service.js',
'angular/widget.module.js',
'angular/help-panel/help-panel.js',
'angular/wizard/wizard.js',
'angular/table/table.js',
'angular/modal/modal.js',
'angular/bind-scope/bind-scope.js',
'angular/transfer-table/transfer-table.js',
'angular/charts/charts.js',
'angular/charts/chart-tooltip.js',
'angular/charts/pie-chart.js',
]
specs = [
'horizon/tests/jasmine/utilsSpec.js',
'horizon/tests/jasmine/metadataWidgetControllerSpec.js',
'horizon/js/angular/services/hz.api.service.spec.js',
'angular/help-panel/help-panel.spec.js',
'angular/wizard/wizard.spec.js',
'angular/table/table.spec.js',
'angular/modal/simple-modal.spec.js',
'angular/bind-scope/bind-scope.spec.js',
'angular/transfer-table/transfer-table.spec.js',
'angular/charts/pie-chart.spec.js',
]
externalTemplates = [
'angular/help-panel/help-panel.html',
'angular/wizard/wizard.html',
'angular/transfer-table/transfer-table.html',
'angular/charts/chart-tooltip.html',
'angular/charts/pie-chart.html',
]
| apache-2.0 | Python |
972bf4cbabf517e17a717ca02faa279f8bb67fea | set method instead of a set literal for py2 compatibility. | researchstudio-sat/wonpreprocessing,researchstudio-sat/wonpreprocessing,researchstudio-sat/wonpreprocessing,researchstudio-sat/wonpreprocessing | python-processing/src/main/python/add_keyword_slice.py | python-processing/src/main/python/add_keyword_slice.py | import codecs
import sys
from os import listdir
from os.path import isfile, join
import numpy as np
from scipy.sparse.coo import coo_matrix
from scipy.io import mmwrite
import six
from feature_extraction import vectorize_and_transform, apply_threshold, \
lemma_tokenizer, PosTagLemmaTokenizer
from evaluate_link_prediction import read_input_tensor
if len(sys.argv) < 3:
raise Exception("ARGS: <documents dir> <rescal dir> [<skip pos tagging>]")
doc_path = sys.argv[1]
rescal_path = sys.argv[2]
if len(sys.argv) == 4 and sys.argv[3].lower() in set(('y', 'yes', 't', 'true')):
print("Will not use POS tagging")
tokenizer = lemma_tokenizer
else:
print("Will use POS tagging.")
tokenizer = PosTagLemmaTokenizer()
documents = []
for f in listdir(doc_path):
if isfile(join(doc_path, f)) and f.endswith('.eml'):
try:
documents.append(six.text_type(f.rstrip('.eml')))
except UnicodeDecodeError:
print("Skipping file: ", f)
print('Loaded ', len(documents), ' files from path: ', doc_path)
print('Extracting features')
file_paths = [join(doc_path, f + '.eml') for f in documents]
data, features = vectorize_and_transform(file_paths)
data = apply_threshold(data, 0.3)
# TODO: filter features and filenames based on whether their entries survived
print('Reading tensor')
tensor, headers = read_input_tensor(rescal_path)
NEED_STR = 'Need: '
ATTR_STR = 'Attr: '
LEN = len(NEED_STR)
document_index, feature_index = {}, {}
headers_cursor = 1
for header in headers:
if header.startswith(NEED_STR):
document_index[header[LEN:]] = headers_cursor
elif header.startswith(ATTR_STR):
feature_index[header[LEN:]] = headers_cursor
headers_cursor += 1
for filename in documents:
if filename not in document_index:
# print('Adding NEED at index (', headers_cursor, '): ', filename)
headers.append(NEED_STR + filename)
document_index[filename] = headers_cursor
headers_cursor += 1
for feature in features:
# print('Adding ATTR at index (', headers_cursor, '): ', feature)
headers.append(ATTR_STR + feature)
feature_index[feature] = headers_cursor
headers_cursor += 1
offset_row = np.array([document_index[documents[i]] for i in data.row])
offset_col = np.array([feature_index[features[i]] for i in data.col])
offset_matrix = coo_matrix((data.data, (offset_row, offset_col)))
with codecs.open(rescal_path + '/headers.txt', 'w', encoding='utf8') as f:
f.write('\n'.join(headers))
mmwrite(rescal_path + '/keywords_slice.mtx', offset_matrix)
| import codecs
import sys
from os import listdir
from os.path import isfile, join
import numpy as np
from scipy.sparse.coo import coo_matrix
from scipy.io import mmwrite
import six
from feature_extraction import vectorize_and_transform, apply_threshold, \
lemma_tokenizer, PosTagLemmaTokenizer
from evaluate_link_prediction import read_input_tensor
if len(sys.argv) < 3:
raise Exception("ARGS: <documents dir> <rescal dir> [<skip pos tagging>]")
doc_path = sys.argv[1]
rescal_path = sys.argv[2]
if len(sys.argv) == 4 and sys.argv[3].lower() in {'y', 'yes', 't', 'true'}:
print("Will not use POS tagging")
tokenizer = lemma_tokenizer
else:
print("Will use POS tagging.")
tokenizer = PosTagLemmaTokenizer()
documents = []
for f in listdir(doc_path):
if isfile(join(doc_path, f)) and f.endswith('.eml'):
try:
documents.append(six.text_type(f.rstrip('.eml')))
except UnicodeDecodeError:
print("Skipping file: ", f)
print('Loaded ', len(documents), ' files from path: ', doc_path)
print('Extracting features')
file_paths = [join(doc_path, f + '.eml') for f in documents]
data, features = vectorize_and_transform(file_paths)
data = apply_threshold(data, 0.3)
# TODO: filter features and filenames based on whether their entries survived
print('Reading tensor')
tensor, headers = read_input_tensor(rescal_path)
NEED_STR = 'Need: '
ATTR_STR = 'Attr: '
LEN = len(NEED_STR)
document_index, feature_index = {}, {}
headers_cursor = 1
for header in headers:
if header.startswith(NEED_STR):
document_index[header[LEN:]] = headers_cursor
elif header.startswith(ATTR_STR):
feature_index[header[LEN:]] = headers_cursor
headers_cursor += 1
for filename in documents:
if filename not in document_index:
# print('Adding NEED at index (', headers_cursor, '): ', filename)
headers.append(NEED_STR + filename)
document_index[filename] = headers_cursor
headers_cursor += 1
for feature in features:
# print('Adding ATTR at index (', headers_cursor, '): ', feature)
headers.append(ATTR_STR + feature)
feature_index[feature] = headers_cursor
headers_cursor += 1
offset_row = np.array([document_index[documents[i]] for i in data.row])
offset_col = np.array([feature_index[features[i]] for i in data.col])
offset_matrix = coo_matrix((data.data, (offset_row, offset_col)))
with codecs.open(rescal_path + '/headers.txt', 'w', encoding='utf8') as f:
f.write('\n'.join(headers))
mmwrite(rescal_path + '/keywords_slice.mtx', offset_matrix)
| apache-2.0 | Python |
f5ede7288175d4751edbbec4c5e04c9cef29d910 | fix log.py | alaudet/raspi-sump,alaudet/raspi-sump,jreuter/raspi-sump,jreuter/raspi-sump | raspisump/log.py | raspisump/log.py | import time
def log_reading(water_depth):
"""Log time and water depth reading."""
time_of_reading = time.strftime("%H:%M:%S,")
filename = "/home/pi/raspi-sump/csv/waterlevel-{}.csv".format(
time.strftime("%Y%m%d")
)
csv_file = open(filename, 'a')
csv_file.write(time_of_reading),
csv_file.write(str(water_depth)),
csv_file.write("\n")
csv_file.close()
def log_restarts(reason):
'''Log all process restarts'''
logfile = open("/home/pi/raspi-sump/logs/process_log", 'a')
logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")),
logfile.write(reason),
logfile.write("\n")
logfile.close
| import time
def log_reading(water_depth):
"""Log time and water depth reading."""
time_of_reading = time.strftime("%H:%M:%S,")
filename = "/home/pi/raspi-sump/csv/waterlevel-{}.csv".format(
time.strftime("%Y%m%d")
)
csv_file = open(filename, 'a')
csv_file.write(time_of_reading),
csv_file.write(str(water_depth)),
csv_file.write("\n")
csv_file.close()
| mit | Python |
a00bd97721c4385885fd42f49506972781fb3b8b | Decrease sleep time | dashford/sentinel | src/Notification/Subscriber/LED/RGB.py | src/Notification/Subscriber/LED/RGB.py | import time
import RPi.GPIO as GPIO
class RGB:
def __init__(self, configuration):
self._id = configuration['id']
self._R = configuration['channels']['r']
self._G = configuration['channels']['g']
self._B = configuration['channels']['b']
pins = [
self._R,
self._G,
self._B
]
GPIO.setmode(GPIO.BCM)
GPIO.setup(pins, GPIO.OUT, initial=GPIO.LOW)
def blink(self, mosq, obj, msg):
print(msg.payload)
led = GPIO.PWM(self._G, 100)
led.start(0)
for i in range(0, 100):
led.ChangeDutyCycle(i)
time.sleep(0.01)
for i in range(100, 0, -1):
led.ChangeDutyCycle(i)
time.sleep(0.01)
led.stop()
# GPIO.output(self._G, GPIO.HIGH)
# time.sleep(2)
# GPIO.output(self._G, GPIO.LOW)
# def red(self):
# return self._R
#
# def green(self):
# return self._G
#
# def blue(self):
# return self._B
#
# def on(self, channel):
# GPIO.output(channel, GPIO.HIGH)
#
# def off(self, channel):
# GPIO.output(channel, GPIO.LOW)
| import time
import RPi.GPIO as GPIO
class RGB:
def __init__(self, configuration):
self._id = configuration['id']
self._R = configuration['channels']['r']
self._G = configuration['channels']['g']
self._B = configuration['channels']['b']
pins = [
self._R,
self._G,
self._B
]
GPIO.setmode(GPIO.BCM)
GPIO.setup(pins, GPIO.OUT, initial=GPIO.LOW)
def blink(self, mosq, obj, msg):
print(msg.payload)
led = GPIO.PWM(self._G, 100)
led.start(0)
for i in range(0, 100):
led.ChangeDutyCycle(i)
time.sleep(0.02)
for i in range(100, 0, -1):
led.ChangeDutyCycle(i)
time.sleep(0.02)
led.stop()
# GPIO.output(self._G, GPIO.HIGH)
# time.sleep(2)
# GPIO.output(self._G, GPIO.LOW)
# def red(self):
# return self._R
#
# def green(self):
# return self._G
#
# def blue(self):
# return self._B
#
# def on(self, channel):
# GPIO.output(channel, GPIO.HIGH)
#
# def off(self, channel):
# GPIO.output(channel, GPIO.LOW)
| mit | Python |
a3357cd4bb0859f480fa91f50604a2f129431096 | Exclude upper case letters from generated passwords. | SUNET/eduid-signup,SUNET/eduid-signup,SUNET/eduid-signup | eduid_signup/vccs.py | eduid_signup/vccs.py | from pwgen import pwgen
from re import findall
import vccs_client
def generate_password(settings, credential_id, email):
"""
Generate a new password credential and add it to the VCCS authentication backend.
The salt returned needs to be saved for use in subsequent authentications using
this password. The password is returned so that it can be conveyed to the user.
:param settings: settings dict
:param credential_id: VCCS credential_id as string
:param email: user e-mail address as string
:return: (password, salt) both strings
"""
password = pwgen(settings.get('password_length'), no_capitalize = True, no_symbols = True)
factor = vccs_client.VCCSPasswordFactor(password,
credential_id = credential_id)
vccs = vccs_client.VCCSClient(base_url = settings.get('vccs_url'))
vccs.add_credentials(email, [factor])
return (_human_readable(password), factor.salt)
def _human_readable(password):
"""
Format a random password more readable to humans (groups of four characters).
:param password: string
:return: readable password as string
:rtype: string
"""
regexp = '.{,4}'
parts = findall(regexp, password)
return ' '.join(parts)
| from pwgen import pwgen
from re import findall
import vccs_client
def generate_password(settings, credential_id, email):
"""
Generate a new password credential and add it to the VCCS authentication backend.
The salt returned needs to be saved for use in subsequent authentications using
this password. The password is returned so that it can be conveyed to the user.
:param settings: settings dict
:param credential_id: VCCS credential_id as string
:param email: user e-mail address as string
:return: (password, salt) both strings
"""
password = pwgen(settings.get('password_length'), no_symbols = True)
factor = vccs_client.VCCSPasswordFactor(password,
credential_id = credential_id)
vccs = vccs_client.VCCSClient(base_url = settings.get('vccs_url'))
vccs.add_credentials(email, [factor])
return (_human_readable(password), factor.salt)
def _human_readable(password):
"""
Format a random password more readable to humans (groups of four characters).
:param password: string
:return: readable password as string
:rtype: string
"""
regexp = '.{,4}'
parts = findall(regexp, password)
return ' '.join(parts)
| bsd-3-clause | Python |
cd198b51d2c9271255b5e79a2216964c6d1ccd46 | Bump version to 0.0.12 | base4sistemas/pyescpos | escpos/__init__.py | escpos/__init__.py | # -*- coding: utf-8 -*-
#
# escpos/__init__.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.0.12'
| # -*- coding: utf-8 -*-
#
# escpos/__init__.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.0.11'
| apache-2.0 | Python |
45f890b81d500f0d35c848fa1c5f926f145d9c1c | fix version so setuptools quits complaining | trozamon/hadmin | hadmin/__init__.py | hadmin/__init__.py | __version__ = '0.3.dev0'
| __version__ = '0.3.dev'
| mit | Python |
5af2ddef6c02d2650028e3b059a2f350599cb8e9 | Set module installable | open-synergy/opnsynid-hr | hr_attendance_analysis/__openerp__.py | hr_attendance_analysis/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "HR - Attendance Analysis",
'version': '8.0.1.0.0',
'category': 'Generic Modules/Human Resources',
'summary': "Dynamic reports based on employee's attendances and "
"contract's calendar",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'hr_attendance',
'hr_contract',
'hr_holidays',
'report_webkit'
],
"data": [
'company_view.xml',
'hr_attendance_view.xml',
'reports.xml',
'wizard/print_calendar_report.xml',
'resource_view.xml',
'security/ir.model.access.csv',
],
"demo": [
'hr_attendance_demo.xml',
],
"test": [
'test/attendances.yml',
],
"installable": True
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "HR - Attendance Analysis",
'version': '8.0.1.0.0',
'category': 'Generic Modules/Human Resources',
'summary': "Dynamic reports based on employee's attendances and "
"contract's calendar",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'hr_attendance',
'hr_contract',
'hr_holidays',
'report_webkit'
],
"data": [
'company_view.xml',
'hr_attendance_view.xml',
'reports.xml',
'wizard/print_calendar_report.xml',
'resource_view.xml',
'security/ir.model.access.csv',
],
"demo": [
'hr_attendance_demo.xml',
],
"test": [
'test/attendances.yml',
],
"installable": False
}
| agpl-3.0 | Python |
0796791ad94f31a1e97dac42c29210af6fb0ea69 | Make backends/template a valid Python file | severin-lemaignan/minimalkb,chili-epfl/minimalkb | src/minimalkb/backends/template.py | src/minimalkb/backends/template.py | import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
class TemplateBackend:
def __init__(self):
pass
def clear(self):
""" Empties all knowledge models.
"""
raise NotImplementedError()
def add(self, stmts, model = "default"):
""" Add the given statements to the given model.
"""
raise NotImplementedError()
def delete(self, stmts, model = "default"):
""" remove the given statements from the given model.
"""
raise NotImplementedError()
def update(self, stmts, model = "default"):
""" Add the given statements to the given model, updating the statements
with a functional predicate.
"""
raise NotImplementedError()
def about(self, resource, models):
""" Returns all statements involving the resource.
"""
raise NotImplementedError()
def has(self, stmts, models):
""" Returns true if the statements or partial statements
are present in the knowledge models.
"""
raise NotImplementedError()
def query(self, vars, patterns, models):
raise NotImplementedError()
def classesof(self, concept, direct, models):
""" Returns the RDF classes of the concept.
"""
raise NotImplementedError()
| import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
class TemplateBackend:
def __init__(self):
def clear(self):
""" Empties all knowledge models.
"""
raise NotImplementedError()
def add(self, stmts, model = "default"):
""" Add the given statements to the given model.
"""
raise NotImplementedError()
def delete(self, stmts, model = "default"):
""" remove the given statements from the given model.
"""
raise NotImplementedError()
def update(self, stmts, model = "default"):
""" Add the given statements to the given model, updating the statements
with a functional predicate.
"""
raise NotImplementedError()
def about(self, resource, models):
""" Returns all statements involving the resource.
"""
raise NotImplementedError()
def has(self, stmts, models):
""" Returns true if the statements or partial statements
are present in the knowledge models.
"""
raise NotImplementedError()
def query(self, vars, patterns, models):
raise NotImplementedError()
def classesof(self, concept, direct, models):
""" Returns the RDF classes of the concept.
"""
raise NotImplementedError()
| bsd-3-clause | Python |
2ed4867602bf02640ff86160a994c567c974e70d | Add 'set active account' logic to CLI | vegarwe/sqrl,vegarwe/sqrl,bushxnyc/sqrl,vegarwe/sqrl,vegarwe/sqrl | sqrl/sqrl.py | sqrl/sqrl.py | #!/usr/bin/env python
# TODO Catch connection errors
# TODO Catch sqrlurl format errors
# TODO Add logging option
"""
Usage: sqrl [-d] [-n] [-l] [--id <AccountID>] [--create="<Name>"] [--path=<Dir>] [<SQRLURL>]
Options:
-d Debugging output
-id Set an account as Default
-l List Accounts
-c <Your Name> Create Account
-n Notify via libnotify (Gnome)
-p --path=<Dir> Path for config and key storage
Example:
sqrl "sqrl://example.com/login/sqrl?d=6&nut=a95fa8e88dc499758"
"""
import os
import sys
from .mkm import MKM
from client import Client
from docopt import docopt
VERSION = "0.0.2"
HOME = os.environ['HOME']
CONFIG_DIR = '.config/sqrl/'
WORKING_DIR = HOME + '/' + CONFIG_DIR
def main():
arguments = docopt(__doc__, version=VERSION)
# Collecting arguments
url = arguments.get('<SQRLURL>')
create_acct = arguments.get('--create')
bool_notify = arguments.get('-n')
path = arguments.get('--path')
debug = arguments.get('-d')
list = arguments.get('-l')
if not path:
path = WORKING_DIR
manager = MKM(path)
if account_id:
select_account(manager, account_id)
if list:
list_accounts(manager)
if create_acct:
create_account(manager, create_acct)
if not debug:
debug = False
run(url, manager, debug, bool_notify)
def list_accounts(manager):
"""
List out ID and Name for each account
"""
accounts = manager.list_accounts()
output = []
for k in accounts.keys():
if accounts[k]['active']:
output.insert(0, ("* " + accounts[k]['id'] +
" [Name: " + accounts[k]['name'] + "]"))
else:
output.append(" " + accounts[k]['id'] + " [Name: "
+ accounts[k]['name'] + "]")
print "\n".join(output)
sys.exit()
def create_account(manager, name):
password = raw_input("Please Enter Master Password: ")
password_confirm = raw_input("Please Confirm Master Password: ")
def select_account(manager, id):
manager.set_account(id)
list_accounts(manager)
sys.exit()
if manager.create_account({'name': name},
password, password_confirm):
print "Account Created"
else:
print "Account NOT Created"
sys.exit()
# Create sqrl client and submit request
sqrlclient = Client(masterkey, url, bool_notify, debug)
sqrlclient.submit()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# TODO Catch connection errors
# TODO Catch sqrlurl format errors
# TODO Add logging option
"""
Usage: sqrl [-d] [-n] [-l] [--create="<Users Name>"] [--path=<Dir>] [<SQRLURL>]
Options:
-d Debugging output
-l List Accounts
-c <Your Name> Create Account
-n Notify via libnotify (Gnome)
-p --path=<Dir> Path for config and key storage
Example:
sqrl "sqrl://example.com/login/sqrl?d=6&nut=a95fa8e88dc499758"
"""
import os
import sys
from .mkm import MKM
from client import Client
from docopt import docopt
VERSION = "0.0.2"
HOME = os.environ['HOME']
CONFIG_DIR = '.config/sqrl/'
WORKING_DIR = HOME + '/' + CONFIG_DIR
def main():
arguments = docopt(__doc__, version=VERSION)
# Collecting arguments
url = arguments.get('<SQRLURL>')
create_acct = arguments.get('--create')
bool_notify = arguments.get('-n')
path = arguments.get('--path')
debug = arguments.get('-d')
list = arguments.get('-l')
if not path:
path = WORKING_DIR
manager = MKM(path)
if list:
list_accounts(manager)
if create_acct:
create_account(manager, create_acct)
if not debug:
debug = False
run(url, manager, debug, bool_notify)
def list_accounts(manager):
"""
List out ID and Name for each account
"""
accounts = manager.list_accounts()
for k in accounts.keys():
print accounts[k]['id'] + " [Name: " + accounts[k]['name'] + "]"
sys.exit()
def create_account(manager, name):
password = raw_input("Please Enter Master Password: ")
password_confirm = raw_input("Please Confirm Master Password: ")
if manager.create_account({'name': name},
password, password_confirm):
print "Account Created"
else:
print "Account NOT Created"
sys.exit()
# Create sqrl client and submit request
sqrlclient = Client(masterkey, url, bool_notify, debug)
sqrlclient.submit()
if __name__ == "__main__":
main()
| mit | Python |
3a7ac1e8be7fa4b8ff34108d2aea235873056815 | fix unit test bug intro'ed by change dccc5a57797e | simonsdave/cloudfeaster,simonsdave/cloudfeaster,simonsdave/cloudfeaster | bin/tests/spiders_dot_py_tests.py | bin/tests/spiders_dot_py_tests.py | """This module contains "unit" tests for ```spiders.py```."""
import json
import subprocess
import unittest
from nose.plugins.attrib import attr
@attr('integration')
class TestSpidersDotPy(unittest.TestCase):
def test_all_good(self):
p = subprocess.Popen(
['spiders.py'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout_and_stderr, '{}\n')
def test_invalid_command_line_args(self):
p = subprocess.Popen(
['spiders.py', 'dave-creates-an-error'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 2)
self.assertEqual(
stdout_and_stderr,
'Usage: spiders.py [options] <package>\n\nspiders.py: error: try again ...\n')
def test_load_sample_spiders(self):
p = subprocess.Popen(
['spiders.py', '--samples'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 0)
expected_stdout_and_stderr = {
"cloudfeaster.samples.pypi_spider.PyPISpider": {
"url": "http://pypi-ranking.info/alltime",
"factor_display_names": {},
"ttl": 60,
"factor_display_order": []
},
"cloudfeaster.samples.bank_of_canada_daily_exchange_rates.BankOfCanadaDailyExchangeRatesSpider": {
"url": "http://www.bankofcanada.ca/rates/exchange/daily-exchange-rates/",
"factor_display_names": {},
"ttl": 60,
"factor_display_order": []
}
}
self.assertEqual(json.loads(stdout_and_stderr), expected_stdout_and_stderr)
| """This module contains "unit" tests for ```spiders.py```."""
import subprocess
import unittest
from nose.plugins.attrib import attr
@attr('integration')
class TestSpidersDotPy(unittest.TestCase):
def test_all_good(self):
p = subprocess.Popen(
['spiders.py'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout_and_stderr, '{}\n')
def test_invalid_command_line_args(self):
p = subprocess.Popen(
['spiders.py', 'dave-creates-an-error'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 2)
self.assertEqual(
stdout_and_stderr,
'Usage: spiders.py [options] <package>\n\nspiders.py: error: try again ...\n')
def test_load_sample_spiders(self):
p = subprocess.Popen(
['spiders.py', '--samples'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout_and_stderr, _) = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertEqual(
stdout_and_stderr,
(
'{"cloudfeaster.samples.pypi_spider.PyPISpider": '
'{"url": "http://pypi-ranking.info/alltime", "factor_display_names": {}, "ttl": 60, '
'"factor_display_order": []}}\n'
))
| mit | Python |
b03ed6307bd1354b931cdd993361d0a40a1d6850 | Reorder imports in alphabetical order | alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality | api/init/graphqlapi/proxy.py | api/init/graphqlapi/proxy.py | import graphqlapi.utils as utils
from graphqlapi.exceptions import RequestException
from graphqlapi.interceptor import ExecuteBatch, TestDataSource
from graphql.parser import GraphQLParser
interceptors = [
ExecuteBatch(),
TestDataSource()
]
def proxy_request(payload: dict):
graphql_ast = parse_query(payload['query'])
# Execute request on GraphQL API
status, data = utils.execute_graphql_request(payload['query'])
for interceptor in interceptors:
if interceptor.can_handle(graphql_ast):
data = interceptor.after_request(graphql_ast, status, data)
return 200 if status == 200 else 500, data
def parse_query(payload_query: str):
try:
return GraphQLParser().parse(payload_query)
except Exception:
raise RequestException(400, 'Invalid GraphQL query')
| import graphqlapi.utils as utils
from graphql.parser import GraphQLParser
from graphqlapi.interceptor import ExecuteBatch, TestDataSource
from graphqlapi.exceptions import RequestException
interceptors = [
ExecuteBatch(),
TestDataSource()
]
def proxy_request(payload: dict):
graphql_ast = parse_query(payload['query'])
# Execute request on GraphQL API
status, data = utils.execute_graphql_request(payload['query'])
for interceptor in interceptors:
if interceptor.can_handle(graphql_ast):
data = interceptor.after_request(graphql_ast, status, data)
return 200 if status == 200 else 500, data
def parse_query(payload_query: str):
try:
return GraphQLParser().parse(payload_query)
except Exception:
raise RequestException(400, 'Invalid GraphQL query')
| apache-2.0 | Python |
a5d9a1806225a5bbc50f57d3f190de3dee34ee59 | Support no_match_error in selects.with_or(). | bazelbuild/bazel-skylib,bazelbuild/bazel-skylib,bazelbuild/bazel-skylib | lib/selects.bzl | lib/selects.bzl | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylib module containing convenience interfaces for select()."""
def _with_or(input_dict, no_match_error=''):
"""Drop-in replacement for `select()` that supports ORed keys.
Args:
input_dict: The same dictionary `select()` takes, except keys may take
either the usual form `"//foo:config1"` or
`("//foo:config1", "//foo:config2", ...)` to signify
`//foo:config1` OR `//foo:config2` OR `...`.
no_match_error: Optional custom error to report if no condition matches.
Example:
```build
deps = selects.with_or({
"//configs:one": [":dep1"],
("//configs:two", "//configs:three"): [":dep2or3"],
"//configs:four": [":dep4"],
"//conditions:default": [":default"]
})
```
Key labels may appear at most once anywhere in the input.
Returns:
A native `select()` that expands
`("//configs:two", "//configs:three"): [":dep2or3"]`
to
```build
"//configs:two": [":dep2or3"],
"//configs:three": [":dep2or3"],
```
"""
return select(_with_or_dict(input_dict), no_match_error=no_match_error)
def _with_or_dict(input_dict):
"""Variation of `with_or` that returns the dict of the `select()`.
Unlike `select()`, the contents of the dict can be inspected by Skylark
macros.
Args:
input_dict: Same as `with_or`.
Returns:
A dictionary usable by a native `select()`.
"""
output_dict = {}
for (key, value) in input_dict.items():
if type(key) == type(()):
for config_setting in key:
if config_setting in output_dict.keys():
fail("key %s appears multiple times" % config_setting)
output_dict[config_setting] = value
else:
if key in output_dict.keys():
fail("key %s appears multiple times" % config_setting)
output_dict[key] = value
return output_dict
selects = struct(
with_or=_with_or,
with_or_dict=_with_or_dict
)
| # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylib module containing convenience interfaces for select()."""
def _with_or(input_dict):
"""Drop-in replacement for `select()` that supports ORed keys.
Args:
input_dict: The same dictionary `select()` takes, except keys may take
either the usual form `"//foo:config1"` or
`("//foo:config1", "//foo:config2", ...)` to signify
`//foo:config1` OR `//foo:config2` OR `...`.
Example:
```build
deps = selects.with_or({
"//configs:one": [":dep1"],
("//configs:two", "//configs:three"): [":dep2or3"],
"//configs:four": [":dep4"],
"//conditions:default": [":default"]
})
```
Key labels may appear at most once anywhere in the input.
Returns:
A native `select()` that expands
`("//configs:two", "//configs:three"): [":dep2or3"]`
to
```build
"//configs:two": [":dep2or3"],
"//configs:three": [":dep2or3"],
```
"""
return select(_with_or_dict(input_dict))
def _with_or_dict(input_dict):
"""Variation of `with_or` that returns the dict of the `select()`.
Unlike `select()`, the contents of the dict can be inspected by Skylark
macros.
Args:
input_dict: Same as `with_or`.
Returns:
A dictionary usable by a native `select()`.
"""
output_dict = {}
for (key, value) in input_dict.items():
if type(key) == type(()):
for config_setting in key:
if config_setting in output_dict.keys():
fail("key %s appears multiple times" % config_setting)
output_dict[config_setting] = value
else:
if key in output_dict.keys():
fail("key %s appears multiple times" % config_setting)
output_dict[key] = value
return output_dict
selects = struct(
with_or=_with_or,
with_or_dict=_with_or_dict
)
| apache-2.0 | Python |
05fc13ef7b0c48a8411252a9c05d518eac842071 | load calibration file properly; typo on variable name | sbma44/stephmeter,sbma44/yellowlight,sbma44/yellowlight | stephmeter.py | stephmeter.py | #!/home/pi/.virtualenvs/stephmeter/bin/python
import nextbus
import pwm_calibrate
import led
import time
from settings import *
def main():
l = led.LED(SERIAL_DEVICE, SERIAL_SPEED)
p = pwm_calibrate.PWMCalibrator()
p.load()
p_range = p.get_range()
nb = nextbus.NextbusPredictor(NEXTBUS_ROUTES)
while True:
nb.refresh_if_necessary()
(route, minutes) = nb.get_closest_arrival()
minutes = min(max(minutes, p_range[0]), p_range[1])
if int(route)==43:
l.set(0, 255, 0)
elif int(route)==42:
l.set(0, 0, 255)
p.setPWM(minutes)
time.sleep(0.1)
if __name__ == '__main__':
main()
| #!/home/pi/.virtualenvs/stephmeter/bin/python
import nextbus
import pwm_calibrate
import led
import time
from settings import *
def main():
l = led.LED(SERIAL_DEVICE, SERIAL_SPEED)
p = pwm_calibrate.PWMCalibrator()
p_range = p.get_range()
nb = nextbus.NextbusPredictor(NEXTBUS_ROUTES)
while True:
nb.refresh_if_necessary()
(route, minutes) = nb.get_closest_arrival()
minutes = min(max(minutes, p_range[0]), p_range[1])
if int(route)==43:
l.set(0, 255, 0)
elif int(route)==42:
l.set(0, 0, 255)
P.setPWM(minutes)
time.sleep(0.1)
if __name__ == '__main__':
main()
| mit | Python |
9e5583ab08bb40d0cedd4931a2fde59b2040d554 | fix error when no ssid is found in db | amfoss/fosswebsite,akshaya9/fosswebsite,akshaya9/fosswebsite,akshaya9/fosswebsite,amfoss/fosswebsite,amfoss/fosswebsite | attendance/management/commands/update_ssid.py | attendance/management/commands/update_ssid.py | from django.core.management import BaseCommand
from attendance.models import SSIDName
class Command(BaseCommand):
help = 'Generates a new ssid(need to run as a cron job everyday)'
def handle(self, *args, **options):
ssid_name, created = SSIDName.objects.get_or_create(id=1)
ssid_name.generate_random_name()
ssid_name.save()
print(ssid_name.name)
| from django.core.management import BaseCommand
from attendance.models import SSIDName
class Command(BaseCommand):
help = 'Generates a new ssid(need to run as a cron job everyday)'
def handle(self, *args, **options):
ssid_name = SSIDName.objects.first()
ssid_name.generate_random_name()
ssid_name.save()
print(ssid_name.name)
| mit | Python |
230802ee222a6850618517674b991e93a9ed9e6b | Add multiple columns simultaneously | firstblade/zulip,jessedhillon/zulip,joshisa/zulip,tiansiyuan/zulip,dotcool/zulip,noroot/zulip,xuxiao/zulip,amanharitsh123/zulip,cosmicAsymmetry/zulip,mansilladev/zulip,Gabriel0402/zulip,brainwane/zulip,jainayush975/zulip,peguin40/zulip,wangdeshui/zulip,shaunstanislaus/zulip,stamhe/zulip,jackrzhang/zulip,ericzhou2008/zulip,hackerkid/zulip,calvinleenyc/zulip,zwily/zulip,gkotian/zulip,vabs22/zulip,zacps/zulip,blaze225/zulip,jonesgithub/zulip,schatt/zulip,qq1012803704/zulip,MariaFaBella85/zulip,mdavid/zulip,guiquanz/zulip,jimmy54/zulip,zwily/zulip,alliejones/zulip,schatt/zulip,umkay/zulip,hj3938/zulip,jonesgithub/zulip,willingc/zulip,Vallher/zulip,EasonYi/zulip,voidException/zulip,joshisa/zulip,akuseru/zulip,ApsOps/zulip,thomasboyt/zulip,shaunstanislaus/zulip,so0k/zulip,aliceriot/zulip,tiansiyuan/zulip,eastlhu/zulip,Suninus/zulip,mansilladev/zulip,amyliu345/zulip,amanharitsh123/zulip,mohsenSy/zulip,grave-w-grave/zulip,levixie/zulip,amanharitsh123/zulip,schatt/zulip,adnanh/zulip,glovebx/zulip,proliming/zulip,Diptanshu8/zulip,hackerkid/zulip,praveenaki/zulip,punchagan/zulip,xuxiao/zulip,SmartPeople/zulip,jessedhillon/zulip,hengqujushi/zulip,tiansiyuan/zulip,sonali0901/zulip,udxxabp/zulip,Jianchun1/zulip,armooo/zulip,bowlofstew/zulip,blaze225/zulip,Frouk/zulip,proliming/zulip,alliejones/zulip,TigorC/zulip,xuanhan863/zulip,wdaher/zulip,reyha/zulip,SmartPeople/zulip,udxxabp/zulip,pradiptad/zulip,natanovia/zulip,vakila/zulip,EasonYi/zulip,willingc/zulip,aps-sids/zulip,suxinde2009/zulip,sup95/zulip,souravbadami/zulip,PaulPetring/zulip,KJin99/zulip,esander91/zulip,luyifan/zulip,TigorC/zulip,peiwei/zulip,vakila/zulip,wweiradio/zulip,ryansnowboarder/zulip,codeKonami/zulip,ryansnowboarder/zulip,Drooids/zulip,dotcool/zulip,fw1121/zulip,m1ssou/zulip,praveenaki/zulip,avastu/zulip,jeffcao/zulip,thomasboyt/zulip,sup95/zulip,developerfm/zulip,niftynei/zulip,arpith/zulip,mahim97/zulip,jeffcao/zulip,noroot/zulip,wavelets/zulip,willingc/zulip,Frouk/zulip,joshisa/zulip,adnanh/zulip,lfranchi/zulip,ufosky-server/zulip,shubhamdhama/zulip,reyha/zulip,Qgap/zulip,jainayush975/zulip,tommyip/zulip,schatt/zulip,Batterfii/zulip,zulip/zulip,technicalpickles/zulip,dxq-git/zulip,jerryge/zulip,ericzhou2008/zulip,Batterfii/zulip,krtkmj/zulip,rishig/zulip,jimmy54/zulip,Diptanshu8/zulip,vabs22/zulip,zachallaun/zulip,jerryge/zulip,esander91/zulip,hafeez3000/zulip,ryanbackman/zulip,pradiptad/zulip,rishig/zulip,zwily/zulip,jackrzhang/zulip,fw1121/zulip,huangkebo/zulip,dotcool/zulip,calvinleenyc/zulip,JanzTam/zulip,PaulPetring/zulip,peguin40/zulip,xuxiao/zulip,shubhamdhama/zulip,seapasulli/zulip,dwrpayne/zulip,guiquanz/zulip,hustlzp/zulip,amallia/zulip,dnmfarrell/zulip,brockwhittaker/zulip,easyfmxu/zulip,Cheppers/zulip,brainwane/zulip,babbage/zulip,swinghu/zulip,firstblade/zulip,souravbadami/zulip,niftynei/zulip,brockwhittaker/zulip,kaiyuanheshang/zulip,andersk/zulip,glovebx/zulip,gkotian/zulip,developerfm/zulip,levixie/zulip,shrikrishnaholla/zulip,bssrdf/zulip,zachallaun/zulip,jonesgithub/zulip,kokoar/zulip,ipernet/zulip,atomic-labs/zulip,ufosky-server/zulip,jackrzhang/zulip,TigorC/zulip,shaunstanislaus/zulip,showell/zulip,alliejones/zulip,PhilSk/zulip,MayB/zulip,amyliu345/zulip,peiwei/zulip,MayB/zulip,calvinleenyc/zulip,bowlofstew/zulip,vikas-parashar/zulip,seapasulli/zulip,sonali0901/zulip,aakash-cr7/zulip,mdavid/zulip,blaze225/zulip,aps-sids/zulip,Juanvulcano/zulip,suxinde2009/zulip,JPJPJPOPOP/zulip,KJin99/zulip,AZtheAsian/zulip,bssrdf/zulip,peguin40/zulip,DazWorrall/zulip,cosmicAsymmetry/zulip,Galexrt/zulip,ikasumiwt/zulip,xuxiao/zulip,andersk/zulip,LAndreas/zulip,lfranchi/zulip,synicalsyntax/zulip,hengqujushi/zulip,shrikrishnaholla/zulip,karamcnair/zulip,dhcrzf/zulip,souravbadami/zulip,dawran6/zulip,avastu/zulip,bowlofstew/zulip,kaiyuanheshang/zulip,voidException/zulip,Galexrt/zulip,christi3k/zulip,jimmy54/zulip,Vallher/zulip,aakash-cr7/zulip,dhcrzf/zulip,suxinde2009/zulip,zacps/zulip,susansls/zulip,jrowan/zulip,tbutter/zulip,Galexrt/zulip,kokoar/zulip,he15his/zulip,dnmfarrell/zulip,dotcool/zulip,vikas-parashar/zulip,swinghu/zulip,ahmadassaf/zulip,vaidap/zulip,Juanvulcano/zulip,ikasumiwt/zulip,bluesea/zulip,mahim97/zulip,bowlofstew/zulip,voidException/zulip,Juanvulcano/zulip,jerryge/zulip,littledogboy/zulip,amyliu345/zulip,huangkebo/zulip,technicalpickles/zulip,arpith/zulip,verma-varsha/zulip,babbage/zulip,saitodisse/zulip,Diptanshu8/zulip,samatdav/zulip,samatdav/zulip,LAndreas/zulip,gigawhitlocks/zulip,wavelets/zulip,luyifan/zulip,so0k/zulip,ufosky-server/zulip,EasonYi/zulip,peiwei/zulip,Gabriel0402/zulip,Jianchun1/zulip,wweiradio/zulip,j831/zulip,arpith/zulip,mdavid/zulip,jessedhillon/zulip,paxapy/zulip,kou/zulip,DazWorrall/zulip,adnanh/zulip,pradiptad/zulip,SmartPeople/zulip,lfranchi/zulip,ryansnowboarder/zulip,jeffcao/zulip,jeffcao/zulip,levixie/zulip,peguin40/zulip,vaidap/zulip,gkotian/zulip,akuseru/zulip,swinghu/zulip,MayB/zulip,armooo/zulip,Galexrt/zulip,verma-varsha/zulip,AZtheAsian/zulip,grave-w-grave/zulip,suxinde2009/zulip,dxq-git/zulip,RobotCaleb/zulip,Diptanshu8/zulip,Diptanshu8/zulip,jrowan/zulip,shrikrishnaholla/zulip,jphilipsen05/zulip,zhaoweigg/zulip,Gabriel0402/zulip,tiansiyuan/zulip,christi3k/zulip,akuseru/zulip,Vallher/zulip,natanovia/zulip,sharmaeklavya2/zulip,MayB/zulip,bastianh/zulip,adnanh/zulip,m1ssou/zulip,wangdeshui/zulip,stamhe/zulip,xuxiao/zulip,brainwane/zulip,dwrpayne/zulip,bastianh/zulip,pradiptad/zulip,brockwhittaker/zulip,mansilladev/zulip,karamcnair/zulip,akuseru/zulip,paxapy/zulip,willingc/zulip,kou/zulip,themass/zulip,hafeez3000/zulip,JanzTam/zulip,mansilladev/zulip,hafeez3000/zulip,PaulPetring/zulip,amanharitsh123/zulip,PaulPetring/zulip,jackrzhang/zulip,tdr130/zulip,Drooids/zulip,Qgap/zulip,synicalsyntax/zulip,wavelets/zulip,zwily/zulip,easyfmxu/zulip,j831/zulip,ryansnowboarder/zulip,souravbadami/zulip,proliming/zulip,rishig/zulip,eeshangarg/zulip,deer-hope/zulip,swinghu/zulip,Gabriel0402/zulip,fw1121/zulip,ericzhou2008/zulip,saitodisse/zulip,dwrpayne/zulip,MariaFaBella85/zulip,hustlzp/zulip,aliceriot/zulip,technicalpickles/zulip,j831/zulip,joyhchen/zulip,TigorC/zulip,tbutter/zulip,dhcrzf/zulip,tommyip/zulip,kokoar/zulip,synicalsyntax/zulip,aliceriot/zulip,avastu/zulip,itnihao/zulip,nicholasbs/zulip,tommyip/zulip,ryansnowboarder/zulip,pradiptad/zulip,Juanvulcano/zulip,m1ssou/zulip,developerfm/zulip,krtkmj/zulip,blaze225/zulip,so0k/zulip,jonesgithub/zulip,udxxabp/zulip,Frouk/zulip,vakila/zulip,punchagan/zulip,arpith/zulip,brainwane/zulip,eastlhu/zulip,bitemyapp/zulip,MayB/zulip,esander91/zulip,proliming/zulip,nicholasbs/zulip,wavelets/zulip,xuxiao/zulip,hayderimran7/zulip,itnihao/zulip,verma-varsha/zulip,johnny9/zulip,Batterfii/zulip,ashwinirudrappa/zulip,AZtheAsian/zulip,atomic-labs/zulip,babbage/zulip,suxinde2009/zulip,verma-varsha/zulip,bitemyapp/zulip,zhaoweigg/zulip,peiwei/zulip,isht3/zulip,dhcrzf/zulip,dnmfarrell/zulip,lfranchi/zulip,amallia/zulip,hayderimran7/zulip,noroot/zulip,amanharitsh123/zulip,kaiyuanheshang/zulip,hengqujushi/zulip,Suninus/zulip,gigawhitlocks/zulip,zhaoweigg/zulip,kokoar/zulip,aps-sids/zulip,huangkebo/zulip,saitodisse/zulip,wangdeshui/zulip,jphilipsen05/zulip,zacps/zulip,vakila/zulip,amyliu345/zulip,Cheppers/zulip,hafeez3000/zulip,brainwane/zulip,KingxBanana/zulip,bastianh/zulip,showell/zulip,shubhamdhama/zulip,eastlhu/zulip,qq1012803704/zulip,rht/zulip,souravbadami/zulip,easyfmxu/zulip,synicalsyntax/zulip,alliejones/zulip,isht3/zulip,timabbott/zulip,luyifan/zulip,DazWorrall/zulip,arpitpanwar/zulip,LeeRisk/zulip,DazWorrall/zulip,niftynei/zulip,DazWorrall/zulip,zacps/zulip,ikasumiwt/zulip,Suninus/zulip,arpitpanwar/zulip,timabbott/zulip,tdr130/zulip,susansls/zulip,sup95/zulip,hustlzp/zulip,joyhchen/zulip,jeffcao/zulip,pradiptad/zulip,tommyip/zulip,voidException/zulip,hengqujushi/zulip,brockwhittaker/zulip,suxinde2009/zulip,moria/zulip,reyha/zulip,saitodisse/zulip,zhaoweigg/zulip,JanzTam/zulip,zofuthan/zulip,themass/zulip,rht/zulip,mansilladev/zulip,johnnygaddarr/zulip,reyha/zulip,dattatreya303/zulip,Juanvulcano/zulip,ahmadassaf/zulip,noroot/zulip,gkotian/zulip,Batterfii/zulip,vikas-parashar/zulip,esander91/zulip,zwily/zulip,jonesgithub/zulip,johnny9/zulip,wweiradio/zulip,bitemyapp/zulip,paxapy/zulip,synicalsyntax/zulip,andersk/zulip,bitemyapp/zulip,babbage/zulip,hayderimran7/zulip,bluesea/zulip,cosmicAsymmetry/zulip,MayB/zulip,LeeRisk/zulip,peiwei/zulip,Qgap/zulip,hayderimran7/zulip,jainayush975/zulip,hayderimran7/zulip,punchagan/zulip,karamcnair/zulip,MariaFaBella85/zulip,avastu/zulip,themass/zulip,luyifan/zulip,babbage/zulip,littledogboy/zulip,calvinleenyc/zulip,akuseru/zulip,Gabriel0402/zulip,johnnygaddarr/zulip,so0k/zulip,jerryge/zulip,sharmaeklavya2/zulip,xuanhan863/zulip,natanovia/zulip,johnnygaddarr/zulip,bowlofstew/zulip,LeeRisk/zulip,DazWorrall/zulip,ufosky-server/zulip,KingxBanana/zulip,MariaFaBella85/zulip,mohsenSy/zulip,TigorC/zulip,bastianh/zulip,shubhamdhama/zulip,he15his/zulip,hj3938/zulip,mahim97/zulip,vabs22/zulip,MariaFaBella85/zulip,hj3938/zulip,ryanbackman/zulip,aps-sids/zulip,ericzhou2008/zulip,easyfmxu/zulip,bastianh/zulip,wweiradio/zulip,tdr130/zulip,firstblade/zulip,mdavid/zulip,rishig/zulip,Jianchun1/zulip,aps-sids/zulip,codeKonami/zulip,bitemyapp/zulip,developerfm/zulip,he15his/zulip,arpith/zulip,samatdav/zulip,RobotCaleb/zulip,atomic-labs/zulip,eastlhu/zulip,karamcnair/zulip,tdr130/zulip,jackrzhang/zulip,Galexrt/zulip,Cheppers/zulip,susansls/zulip,natanovia/zulip,mahim97/zulip,Drooids/zulip,avastu/zulip,hackerkid/zulip,moria/zulip,ipernet/zulip,zacps/zulip,showell/zulip,isht3/zulip,hustlzp/zulip,codeKonami/zulip,ikasumiwt/zulip,jessedhillon/zulip,so0k/zulip,sup95/zulip,jonesgithub/zulip,easyfmxu/zulip,qq1012803704/zulip,littledogboy/zulip,ufosky-server/zulip,wdaher/zulip,natanovia/zulip,Jianchun1/zulip,umkay/zulip,ashwinirudrappa/zulip,susansls/zulip,thomasboyt/zulip,ApsOps/zulip,LAndreas/zulip,technicalpickles/zulip,kou/zulip,mohsenSy/zulip,levixie/zulip,firstblade/zulip,praveenaki/zulip,proliming/zulip,deer-hope/zulip,gkotian/zulip,calvinleenyc/zulip,KJin99/zulip,tiansiyuan/zulip,grave-w-grave/zulip,yuvipanda/zulip,jerryge/zulip,johnny9/zulip,wavelets/zulip,wdaher/zulip,vikas-parashar/zulip,vakila/zulip,Galexrt/zulip,Frouk/zulip,littledogboy/zulip,swinghu/zulip,LeeRisk/zulip,avastu/zulip,kokoar/zulip,joshisa/zulip,jainayush975/zulip,seapasulli/zulip,isht3/zulip,glovebx/zulip,hustlzp/zulip,Qgap/zulip,zorojean/zulip,timabbott/zulip,KingxBanana/zulip,wdaher/zulip,vakila/zulip,yuvipanda/zulip,so0k/zulip,JPJPJPOPOP/zulip,umkay/zulip,dawran6/zulip,ipernet/zulip,RobotCaleb/zulip,shaunstanislaus/zulip,karamcnair/zulip,moria/zulip,udxxabp/zulip,RobotCaleb/zulip,armooo/zulip,deer-hope/zulip,vaidap/zulip,shrikrishnaholla/zulip,peiwei/zulip,Cheppers/zulip,MariaFaBella85/zulip,qq1012803704/zulip,bluesea/zulip,praveenaki/zulip,zofuthan/zulip,Jianchun1/zulip,tdr130/zulip,sup95/zulip,showell/zulip,dxq-git/zulip,Vallher/zulip,verma-varsha/zulip,ufosky-server/zulip,glovebx/zulip,Gabriel0402/zulip,tdr130/zulip,deer-hope/zulip,arpitpanwar/zulip,udxxabp/zulip,aakash-cr7/zulip,niftynei/zulip,noroot/zulip,krtkmj/zulip,eastlhu/zulip,Batterfii/zulip,themass/zulip,amanharitsh123/zulip,bssrdf/zulip,timabbott/zulip,xuanhan863/zulip,j831/zulip,zhaoweigg/zulip,yuvipanda/zulip,Drooids/zulip,Frouk/zulip,akuseru/zulip,jessedhillon/zulip,jainayush975/zulip,Cheppers/zulip,yocome/zulip,christi3k/zulip,easyfmxu/zulip,Suninus/zulip,shrikrishnaholla/zulip,dattatreya303/zulip,thomasboyt/zulip,samatdav/zulip,Suninus/zulip,wangdeshui/zulip,huangkebo/zulip,mohsenSy/zulip,wdaher/zulip,xuanhan863/zulip,vikas-parashar/zulip,tiansiyuan/zulip,bastianh/zulip,zorojean/zulip,dnmfarrell/zulip,eastlhu/zulip,arpitpanwar/zulip,kokoar/zulip,yuvipanda/zulip,andersk/zulip,KJin99/zulip,zorojean/zulip,amallia/zulip,LAndreas/zulip,schatt/zulip,ikasumiwt/zulip,gigawhitlocks/zulip,LAndreas/zulip,Jianchun1/zulip,jessedhillon/zulip,aakash-cr7/zulip,joshisa/zulip,ikasumiwt/zulip,hayderimran7/zulip,themass/zulip,dattatreya303/zulip,christi3k/zulip,ryanbackman/zulip,sharmaeklavya2/zulip,ahmadassaf/zulip,fw1121/zulip,technicalpickles/zulip,christi3k/zulip,shrikrishnaholla/zulip,AZtheAsian/zulip,bssrdf/zulip,niftynei/zulip,hackerkid/zulip,nicholasbs/zulip,dwrpayne/zulip,EasonYi/zulip,aakash-cr7/zulip,mdavid/zulip,cosmicAsymmetry/zulip,bitemyapp/zulip,cosmicAsymmetry/zulip,xuanhan863/zulip,he15his/zulip,jimmy54/zulip,bluesea/zulip,cosmicAsymmetry/zulip,zhaoweigg/zulip,dxq-git/zulip,krtkmj/zulip,ryansnowboarder/zulip,ahmadassaf/zulip,rishig/zulip,praveenaki/zulip,bowlofstew/zulip,gigawhitlocks/zulip,zwily/zulip,yocome/zulip,firstblade/zulip,jessedhillon/zulip,wavelets/zulip,showell/zulip,eeshangarg/zulip,vabs22/zulip,sharmaeklavya2/zulip,Cheppers/zulip,huangkebo/zulip,ApsOps/zulip,kokoar/zulip,showell/zulip,hustlzp/zulip,dotcool/zulip,niftynei/zulip,bastianh/zulip,rishig/zulip,lfranchi/zulip,itnihao/zulip,Galexrt/zulip,amallia/zulip,xuanhan863/zulip,Vallher/zulip,zachallaun/zulip,ryanbackman/zulip,easyfmxu/zulip,PhilSk/zulip,shubhamdhama/zulip,christi3k/zulip,stamhe/zulip,JPJPJPOPOP/zulip,Qgap/zulip,timabbott/zulip,wavelets/zulip,KingxBanana/zulip,timabbott/zulip,dawran6/zulip,akuseru/zulip,samatdav/zulip,deer-hope/zulip,codeKonami/zulip,eeshangarg/zulip,bluesea/zulip,deer-hope/zulip,sharmaeklavya2/zulip,arpitpanwar/zulip,JanzTam/zulip,dwrpayne/zulip,PhilSk/zulip,dawran6/zulip,aliceriot/zulip,yuvipanda/zulip,jphilipsen05/zulip,sonali0901/zulip,AZtheAsian/zulip,adnanh/zulip,TigorC/zulip,johnny9/zulip,zachallaun/zulip,tommyip/zulip,punchagan/zulip,joshisa/zulip,gkotian/zulip,LeeRisk/zulip,DazWorrall/zulip,arpith/zulip,dattatreya303/zulip,he15his/zulip,ikasumiwt/zulip,ericzhou2008/zulip,ashwinirudrappa/zulip,ipernet/zulip,jerryge/zulip,willingc/zulip,j831/zulip,codeKonami/zulip,johnnygaddarr/zulip,RobotCaleb/zulip,zofuthan/zulip,dotcool/zulip,praveenaki/zulip,jrowan/zulip,aliceriot/zulip,yuvipanda/zulip,joyhchen/zulip,jphilipsen05/zulip,RobotCaleb/zulip,zwily/zulip,KJin99/zulip,wangdeshui/zulip,ahmadassaf/zulip,schatt/zulip,tbutter/zulip,joyhchen/zulip,Frouk/zulip,karamcnair/zulip,themass/zulip,noroot/zulip,levixie/zulip,rht/zulip,yuvipanda/zulip,voidException/zulip,esander91/zulip,dattatreya303/zulip,ipernet/zulip,Batterfii/zulip,JPJPJPOPOP/zulip,peiwei/zulip,kaiyuanheshang/zulip,jimmy54/zulip,krtkmj/zulip,praveenaki/zulip,punchagan/zulip,bitemyapp/zulip,shaunstanislaus/zulip,luyifan/zulip,punchagan/zulip,sharmaeklavya2/zulip,calvinleenyc/zulip,zachallaun/zulip,armooo/zulip,luyifan/zulip,PhilSk/zulip,dattatreya303/zulip,alliejones/zulip,paxapy/zulip,noroot/zulip,rht/zulip,dawran6/zulip,Suninus/zulip,paxapy/zulip,JanzTam/zulip,schatt/zulip,developerfm/zulip,kou/zulip,tbutter/zulip,gigawhitlocks/zulip,tbutter/zulip,codeKonami/zulip,jeffcao/zulip,zachallaun/zulip,synicalsyntax/zulip,karamcnair/zulip,armooo/zulip,bssrdf/zulip,udxxabp/zulip,ryansnowboarder/zulip,SmartPeople/zulip,bluesea/zulip,rht/zulip,brainwane/zulip,eeshangarg/zulip,jphilipsen05/zulip,m1ssou/zulip,tommyip/zulip,jrowan/zulip,firstblade/zulip,thomasboyt/zulip,hj3938/zulip,amyliu345/zulip,dwrpayne/zulip,guiquanz/zulip,stamhe/zulip,vakila/zulip,LAndreas/zulip,ipernet/zulip,ashwinirudrappa/zulip,KJin99/zulip,PaulPetring/zulip,dxq-git/zulip,EasonYi/zulip,ApsOps/zulip,dhcrzf/zulip,m1ssou/zulip,xuanhan863/zulip,levixie/zulip,natanovia/zulip,atomic-labs/zulip,natanovia/zulip,atomic-labs/zulip,KingxBanana/zulip,sup95/zulip,susansls/zulip,vabs22/zulip,wangdeshui/zulip,bluesea/zulip,MayB/zulip,vikas-parashar/zulip,Suninus/zulip,developerfm/zulip,aliceriot/zulip,guiquanz/zulip,tiansiyuan/zulip,zorojean/zulip,wweiradio/zulip,isht3/zulip,voidException/zulip,umkay/zulip,johnnygaddarr/zulip,zachallaun/zulip,grave-w-grave/zulip,huangkebo/zulip,deer-hope/zulip,dhcrzf/zulip,technicalpickles/zulip,EasonYi/zulip,andersk/zulip,vabs22/zulip,zulip/zulip,zulip/zulip,jeffcao/zulip,wweiradio/zulip,glovebx/zulip,moria/zulip,zulip/zulip,EasonYi/zulip,mahim97/zulip,Juanvulcano/zulip,mahim97/zulip,jonesgithub/zulip,hustlzp/zulip,brainwane/zulip,zorojean/zulip,rht/zulip,JanzTam/zulip,proliming/zulip,atomic-labs/zulip,themass/zulip,yocome/zulip,itnihao/zulip,shubhamdhama/zulip,qq1012803704/zulip,grave-w-grave/zulip,aps-sids/zulip,gigawhitlocks/zulip,dnmfarrell/zulip,bowlofstew/zulip,developerfm/zulip,babbage/zulip,vaidap/zulip,armooo/zulip,willingc/zulip,tbutter/zulip,Drooids/zulip,eeshangarg/zulip,jrowan/zulip,sonali0901/zulip,gkotian/zulip,bssrdf/zulip,mdavid/zulip,arpitpanwar/zulip,vaidap/zulip,zofuthan/zulip,zulip/zulip,ryanbackman/zulip,wdaher/zulip,atomic-labs/zulip,fw1121/zulip,mohsenSy/zulip,armooo/zulip,ashwinirudrappa/zulip,AZtheAsian/zulip,yocome/zulip,stamhe/zulip,willingc/zulip,jackrzhang/zulip,jainayush975/zulip,blaze225/zulip,amallia/zulip,qq1012803704/zulip,shaunstanislaus/zulip,seapasulli/zulip,hj3938/zulip,mansilladev/zulip,hengqujushi/zulip,PaulPetring/zulip,esander91/zulip,ApsOps/zulip,moria/zulip,proliming/zulip,ashwinirudrappa/zulip,umkay/zulip,sonali0901/zulip,Frouk/zulip,shubhamdhama/zulip,alliejones/zulip,eeshangarg/zulip,peguin40/zulip,wangdeshui/zulip,saitodisse/zulip,andersk/zulip,reyha/zulip,hj3938/zulip,xuxiao/zulip,amyliu345/zulip,zulip/zulip,zorojean/zulip,dnmfarrell/zulip,brockwhittaker/zulip,LAndreas/zulip,seapasulli/zulip,ericzhou2008/zulip,dhcrzf/zulip,krtkmj/zulip,KingxBanana/zulip,swinghu/zulip,zorojean/zulip,avastu/zulip,susansls/zulip,adnanh/zulip,saitodisse/zulip,technicalpickles/zulip,hj3938/zulip,fw1121/zulip,fw1121/zulip,shaunstanislaus/zulip,yocome/zulip,kaiyuanheshang/zulip,eastlhu/zulip,aakash-cr7/zulip,dwrpayne/zulip,dxq-git/zulip,ashwinirudrappa/zulip,jrowan/zulip,itnihao/zulip,blaze225/zulip,wdaher/zulip,Vallher/zulip,stamhe/zulip,zofuthan/zulip,showell/zulip,RobotCaleb/zulip,esander91/zulip,ericzhou2008/zulip,zofuthan/zulip,tommyip/zulip,dnmfarrell/zulip,itnihao/zulip,ahmadassaf/zulip,moria/zulip,vaidap/zulip,johnny9/zulip,voidException/zulip,ufosky-server/zulip,saitodisse/zulip,PaulPetring/zulip,tbutter/zulip,ipernet/zulip,SmartPeople/zulip,seapasulli/zulip,JPJPJPOPOP/zulip,Batterfii/zulip,andersk/zulip,timabbott/zulip,qq1012803704/zulip,jimmy54/zulip,zulip/zulip,SmartPeople/zulip,Drooids/zulip,so0k/zulip,kou/zulip,Qgap/zulip,yocome/zulip,souravbadami/zulip,hengqujushi/zulip,bssrdf/zulip,he15his/zulip,Cheppers/zulip,mdavid/zulip,alliejones/zulip,nicholasbs/zulip,rishig/zulip,dxq-git/zulip,joyhchen/zulip,yocome/zulip,m1ssou/zulip,shrikrishnaholla/zulip,hengqujushi/zulip,paxapy/zulip,stamhe/zulip,LeeRisk/zulip,hackerkid/zulip,guiquanz/zulip,KJin99/zulip,udxxabp/zulip,sonali0901/zulip,punchagan/zulip,he15his/zulip,MariaFaBella85/zulip,mohsenSy/zulip,thomasboyt/zulip,lfranchi/zulip,tdr130/zulip,johnny9/zulip,adnanh/zulip,JPJPJPOPOP/zulip,PhilSk/zulip,kaiyuanheshang/zulip,glovebx/zulip,pradiptad/zulip,hafeez3000/zulip,ApsOps/zulip,Qgap/zulip,Vallher/zulip,johnnygaddarr/zulip,codeKonami/zulip,PhilSk/zulip,Gabriel0402/zulip,firstblade/zulip,levixie/zulip,zofuthan/zulip,m1ssou/zulip,littledogboy/zulip,hafeez3000/zulip,babbage/zulip,kaiyuanheshang/zulip,luyifan/zulip,umkay/zulip,ryanbackman/zulip,lfranchi/zulip,itnihao/zulip,ahmadassaf/zulip,jerryge/zulip,aliceriot/zulip,nicholasbs/zulip,zacps/zulip,swinghu/zulip,brockwhittaker/zulip,JanzTam/zulip,joyhchen/zulip,huangkebo/zulip,jackrzhang/zulip,suxinde2009/zulip,littledogboy/zulip,aps-sids/zulip,joshisa/zulip,j831/zulip,littledogboy/zulip,dotcool/zulip,wweiradio/zulip,seapasulli/zulip,isht3/zulip,eeshangarg/zulip,umkay/zulip,mansilladev/zulip,johnny9/zulip,verma-varsha/zulip,johnnygaddarr/zulip,samatdav/zulip,kou/zulip,Drooids/zulip,guiquanz/zulip,synicalsyntax/zulip,hackerkid/zulip,hayderimran7/zulip,amallia/zulip,amallia/zulip,LeeRisk/zulip,nicholasbs/zulip,dawran6/zulip,glovebx/zulip,gigawhitlocks/zulip,Diptanshu8/zulip,kou/zulip,grave-w-grave/zulip,hackerkid/zulip,krtkmj/zulip,ApsOps/zulip,rht/zulip,jphilipsen05/zulip,reyha/zulip,thomasboyt/zulip,nicholasbs/zulip,arpitpanwar/zulip,hafeez3000/zulip,peguin40/zulip,zhaoweigg/zulip,moria/zulip,jimmy54/zulip,guiquanz/zulip | zerver/lib/migrate.py | zerver/lib/migrate.py | import re
import time
def timed_ddl(db, stmt):
print
print time.asctime()
print stmt
t = time.time()
db.execute(stmt)
delay = time.time() - t
print 'Took %.2fs' % (delay,)
def validate(sql_thingy):
# Do basic validation that table/col name is safe.
if not re.match('^[a-z][a-z\d_]+$', sql_thingy):
raise Exception('Invalid SQL object: %s' % (sql_thingy,))
def do_batch_update(db, table, cols, vals, batch_size=10000, sleep=0.1):
validate(table)
for col in cols:
validate(col)
stmt = '''
UPDATE %s
SET (%s) = (%s)
WHERE id >= %%s AND id < %%s
''' % (table, ', '.join(cols), ', '.join(['%s'] * len(cols)))
print stmt
(min_id, max_id) = db.execute("SELECT MIN(id), MAX(id) FROM %s" % (table,))[0]
if min_id is None:
return
print "%s rows need updating" % (max_id - min_id,)
while min_id <= max_id:
lower = min_id
upper = min_id + batch_size
print '%s about to update range [%s,%s)' % (time.asctime(), lower, upper)
db.start_transaction()
params = list(vals) + [lower, upper]
db.execute(stmt, params=params)
db.commit_transaction()
min_id = upper
time.sleep(sleep)
def add_bool_columns(db, table, cols):
validate(table)
for col in cols:
validate(col)
coltype = 'boolean'
val = 'false'
stmt = ('ALTER TABLE %s ' % (table,)) \
+ ', '.join(['ADD %s %s' % (col, coltype) for col in cols])
timed_ddl(db, stmt)
stmt = ('ALTER TABLE %s ' % (table,)) \
+ ', '.join(['ALTER %s SET DEFAULT %s' % (col, val) for col in cols])
timed_ddl(db, stmt)
vals = [val] * len(cols)
do_batch_update(db, table, cols, vals)
stmt = 'ANALYZE %s' % (table,)
timed_ddl(db, stmt)
stmt = ('ALTER TABLE %s ' % (table,)) \
+ ', '.join(['ALTER %s SET NOT NULL' % (col,) for col in cols])
timed_ddl(db, stmt)
| import re
import time
def timed_ddl(db, stmt):
print
print time.asctime()
print stmt
t = time.time()
db.execute(stmt)
delay = time.time() - t
print 'Took %.2fs' % (delay,)
def validate(sql_thingy):
# Do basic validation that table/col name is safe.
if not re.match('^[a-z][a-z\d_]+$', sql_thingy):
raise Exception('Invalid SQL object: %s' % (sql_thingy,))
def do_batch_update(db, table, col, val, batch_size=10000, sleep=0.1):
validate(table)
validate(col)
stmt = '''
UPDATE %s
SET %s = %%s
WHERE id >= %%s AND id < %%s
''' % (table, col)
(min_id, max_id) = db.execute("SELECT MIN(id), MAX(id) FROM %s" % (table,))[0]
if min_id is None:
return
while min_id <= max_id:
lower = min_id
upper = min_id + batch_size
print '%s about to update range [%s,%s)' % (time.asctime(), lower, upper)
db.start_transaction()
db.execute(stmt, params=[val, lower, upper])
db.commit_transaction()
min_id = upper
time.sleep(sleep)
def add_bool_column(db, table, col):
validate(table)
validate(col)
coltype = 'boolean'
val = 'false'
stmt = 'ALTER TABLE %s ADD %s %s' % (table, col, coltype)
timed_ddl(db, stmt)
stmt = 'ALTER TABLE %s ALTER %s SET DEFAULT %s' % (table, col, val)
timed_ddl(db, stmt)
do_batch_update(db, table, col, val)
stmt = 'ANALYZE %s' % (table,)
timed_ddl(db, stmt)
stmt = 'ALTER TABLE %s ALTER %s SET NOT NULL' % (table, col)
timed_ddl(db, stmt)
| apache-2.0 | Python |
07d01e16bec95941ac5a85116821190c86658458 | add request data to app class | salamer/jolla,salamer/jolla,NKUCodingCat/jolla,NKUCodingCat/jolla | server/server.py | server/server.py | import gevent.monkey
gevent.monkey.patch_all()
from gevent.pywsgi import WSGIServer
import re
from HTTPerror import HTTP404Error
def render(filename):
with open(filename, "r") as f:
res = f.read()
return res
def index():
return render("../templates/index.html")
def name():
return render("../templates/name.html")
urls = [
(r'/', index),
(r'/name', name)
]
def application(environ, start_response):
the_app = app(environ)
try:
html_code = the_app.parse()
status = '200 OK'
except HTTP404Error:
status = '404 NOT FOUND'
html_code = render("../templates/404.html")
header = [
('Content-Type', 'text/html')
]
start_response(status, header)
return html_code
class app():
def __init__(self, environ):
self._environ = environ
self._path = self._environ['PATH_INFO']
self.request = {}
self.request['method'] = self._environ['REQUEST_METHOD']
self.request['data'] = {}
line = self._environ['QUERY_STRING']
for data_pair in environ['wsgi.input'].read().split('&'):
key,value=data_pair.split('=')
self.request['data'][key]=value
def parse(self):
for url_handler in urls:
if url_handler[0] == r'/':
if self._environ['PATH_INFO'] != '/':
continue
else:
html_code = url_handler[1]()
if re.match(self._environ['PATH_INFO'], url_handler[0]):
html_code = url_handler[1]()
return html_code
else:
raise HTTP404Error('REQUEST NOT FOUND IN ROUTE CONFIGURATION')
class jolla_server(WSGIServer):
def __init__(self, app, port=8000, host="127.0.0.1"):
self.port = port
self.host = host
self._app = app
WSGIServer.__init__(self, listener=(
self.host, self.port), application=self._app)
def run_server(self):
print "the server is running on the {} in the port {}".format(self.host, self.port)
self.serve_forever()
if __name__ == "__main__":
server = jolla_server(application)
server.run_server()
| import gevent.monkey
gevent.monkey.patch_all()
from gevent.pywsgi import WSGIServer
import re
from HTTPerror import HTTP404Error
def render(filename):
with open(filename, "r") as f:
res = f.read()
return res
def index():
return render("../templates/index.html")
def name():
return render("../templates/name.html")
urls = [
(r'/', index),
(r'/name', name)
]
def application(environ, start_response):
the_app = app(environ)
try:
html_code = the_app.parse()
status = '200 OK'
except HTTP404Error:
status = '404 NOT FOUND'
html_code = render("../templates/404.html")
header = [
('Content-Type', 'text/html')
]
start_response(status, header)
return html_code
class app():
def __init__(self, environ):
self._environ = environ
self._path = self._environ['PATH_INFO']
self.request = {}
self.request['method'] = self._environ['REQUEST_METHOD']
self.request['data'] = {}
line = self._environ['QUERY_STRING']
def parse(self):
for url_handler in urls:
if url_handler[0] == r'/':
if self._environ['PATH_INFO'] != '/':
continue
else:
html_code = url_handler[1]()
if re.match(self._environ['PATH_INFO'], url_handler[0]):
html_code = url_handler[1]()
return html_code
else:
raise HTTP404Error('REQUEST NOT FOUND IN ROUTE CONFIGURATION')
class jolla_server(WSGIServer):
def __init__(self, app, port=8000, host="127.0.0.1"):
self.port = port
self.host = host
self._app = app
WSGIServer.__init__(self, listener=(
self.host, self.port), application=self._app)
def run_server(self):
print "the server is running on the {} in the port {}".format(self.host, self.port)
self.serve_forever()
if __name__ == "__main__":
server = jolla_server(application)
server.run_server()
| apache-2.0 | Python |
10f307b7f0c4e0467a053ed59235fa2ba4bc6968 | Fix a test. | ecolell/pfamserver,ecolell/pfamserver,ecolell/pfamserver | backend/tests/api/v0/test_protein_sequence.py | backend/tests/api/v0/test_protein_sequence.py | import json
def test_get_protein_sequence(
db,
client,
egfr_human_partial_sequence,
mock_pfam_scan_egfr_human,
uniprot_reg_full_egfr_human,
):
sequence = egfr_human_partial_sequence
headers = [("Accept", "application/json"), ("Content-Type", "application/json")]
res = client.get("/api/v0/protein_sequences/" + sequence, headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data["query"] == sequence
results = data["output"]
assert results == [
{
"description": "Receptor L domain",
"pfamA_acc": "PF01030",
"seq_start": 57,
"seq_end": 168,
"num_full": 3152,
},
{
"description": "Furin-like cysteine rich region",
"pfamA_acc": "PF00757",
"seq_start": 177,
"seq_end": 338,
"num_full": 1146,
},
{
"description": "Receptor L domain",
"pfamA_acc": "PF01030",
"seq_start": 361,
"seq_end": 481,
"num_full": 3152,
},
{
"description": "Growth factor receptor domain IV",
"pfamA_acc": "PF14843",
"seq_start": 505,
"seq_end": 637,
"num_full": 1070,
},
{
"description": "Protein tyrosine kinase",
"pfamA_acc": "PF07714",
"seq_start": 712,
"seq_end": 968,
"num_full": 68047,
},
]
sequence = sequence[:30] + "\n" + sequence[31:]
res = client.get("/api/v0/protein_sequences/" + sequence, headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data["query"] == sequence
assert data["output"] == []
| from __future__ import unicode_literals
import json
import pytest
def test_get_protein_sequence(db, client, egfr_human_partial_sequence):
sequence = egfr_human_partial_sequence
headers = [('Accept', 'application/json'),
('Content-Type', 'application/json')]
res = client.get('/api/v0/protein_sequences/' + sequence, headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['query'] == sequence
results = data['output']
assert len(results) == 2
assert results[0]['seq_end'] == 61
assert results[0]['seq_start'] == 1
assert results[0]['pfamA_acc'] == 'PF14843'
assert results[0]['description'] == 'Growth factor receptor domain IV'
assert 'num_full' in results[0]
assert results[1]['seq_end'] == 392
assert results[1]['seq_start'] == 136
assert results[1]['pfamA_acc'] == 'PF07714'
assert results[1]['description'] == 'Protein tyrosine kinase'
assert 'num_full' in results[1]
sequence = sequence[:30] + '\n' + sequence[31:]
res = client.get('/api/v0/protein_sequences/' + sequence, headers=headers)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['query'] == sequence
assert data['output'] == []
| agpl-3.0 | Python |
d20aef9623d0f82637220d4a8927c7b0549711c3 | Fix test_fauxware_oppologist. (#2365) | angr/angr,angr/angr,angr/angr | tests/test_oppologist.py | tests/test_oppologist.py | import os
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
def _ultra_oppologist(p, s):
old_ops = dict(angr.engines.vex.claripy.irop.operations)
try:
angr.engines.vex.claripy.irop.operations.clear()
angr.engines.vex.claripy.irop.operations['Iop_Add32'] = old_ops['Iop_Add32']
pg = p.factory.simulation_manager(s)
pg.use_technique(angr.exploration_techniques.Oppologist())
pg.explore()
return pg
finally:
angr.engines.vex.claripy.irop.operations.update(old_ops)
def test_fauxware_oppologist():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'i386', 'fauxware'))
s = p.factory.full_init_state(
remove_options={ angr.options.LAZY_SOLVES, angr.options.EXTENDED_IROP_SUPPORT }
)
pg = _ultra_oppologist(p, s)
assert len(pg.deadended) == 1
assert len(pg.deadended[0].posix.dumps(0)) == 18
stdout = pg.deadended[0].posix.dumps(1)
if b"trusted user" in stdout:
assert stdout.count(b"\n") == 3
else:
assert stdout.count(b"\n") == 2
def test_cromu_70():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'cgc', 'CROMU_00070'))
inp = bytes.fromhex("030e000001000001001200010000586d616ce000000600030000040dd0000000000600000606000006030e000001000001003200010000586d616ce0030000000000030e000001000001003200010000586d616ce003000000000006000006030e000001000001003200010000586d616ce0030000df020000")
s = p.factory.full_init_state(
add_options={ angr.options.UNICORN },
remove_options={ angr.options.LAZY_SOLVES, angr.options.SUPPORT_FLOATING_POINT },
stdin=inp
)
#import traceit
pg = p.factory.simulation_manager(s)
pg.use_technique(angr.exploration_techniques.Oppologist())
pg.run(n=50)
assert pg.one_active.history.block_count > 1500
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
print(f)
all_functions[f]()
if __name__ == '__main__':
run_all()
| import os
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
def _ultra_oppologist(p, s):
old_ops = dict(angr.engines.vex.claripy.irop.operations)
try:
angr.engines.vex.claripy.irop.operations.clear()
angr.engines.vex.claripy.irop.operations['Iop_Add32'] = old_ops['Iop_Add32']
pg = p.factory.simulation_manager(s)
pg.use_technique(angr.exploration_techniques.Oppologist())
pg.explore()
return pg
finally:
angr.engines.vex.claripy.irop.operations.update(old_ops)
def test_fauxware_oppologist():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'i386', 'fauxware'))
s = p.factory.full_init_state(
remove_options={ angr.options.LAZY_SOLVES, angr.options.EXTENDED_IROP_SUPPORT }
)
pg = _ultra_oppologist(p, s)
assert len(pg.deadended) == 1
assert len(pg.deadended[0].posix.dumps(0)) == 18
assert pg.deadended[0].posix.dumps(1).count(b"\n") == 3
def test_cromu_70():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'cgc', 'CROMU_00070'))
inp = bytes.fromhex("030e000001000001001200010000586d616ce000000600030000040dd0000000000600000606000006030e000001000001003200010000586d616ce0030000000000030e000001000001003200010000586d616ce003000000000006000006030e000001000001003200010000586d616ce0030000df020000")
s = p.factory.full_init_state(
add_options={ angr.options.UNICORN },
remove_options={ angr.options.LAZY_SOLVES, angr.options.SUPPORT_FLOATING_POINT },
stdin=inp
)
#import traceit
pg = p.factory.simulation_manager(s)
pg.use_technique(angr.exploration_techniques.Oppologist())
pg.run(n=50)
assert pg.one_active.history.block_count > 1500
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
print(f)
all_functions[f]()
if __name__ == '__main__':
run_all()
| bsd-2-clause | Python |
45596b2e603e1ccb7cb271cc9834ede6293dd700 | create a local ghostpad before linking | h01ger/voctomix,voc/voctomix,h01ger/voctomix,voc/voctomix | voctocore/experiments/binlinktest.py | voctocore/experiments/binlinktest.py | #!/usr/bin/python3
import gi, time
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst, GObject
GObject.threads_init()
Gst.init(None)
class SrcBin(Gst.Bin):
def __init__(self):
super().__init__()
self.src = Gst.ElementFactory.make('videotestsrc', 'src')
self.add(self.src)
self.add_pad(
Gst.GhostPad.new('src', self.src.get_static_pad('src'))
)
class SinkBin(Gst.Bin):
def __init__(self):
super().__init__()
self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
self.add(self.sink)
self.add_pad(
Gst.GhostPad.new('sink', self.sink.get_static_pad('sink'))
)
class MixBin(Gst.Bin):
def __init__(self):
super().__init__()
self.mix = Gst.ElementFactory.make('videomixer', 'src')
self.add(self.mix)
self.add_pad(
Gst.GhostPad.new('src', self.mix.get_static_pad('src'))
)
def create_ghostpad(self, pad):
ghostpad = Gst.GhostPad.new(pad.get_name(), pad)
self.add_pad(ghostpad)
return ghostpad
def add_src(self, src):
sinkpad = self.mix.get_request_pad('sink_%u')
sinkpad.set_property('alpha', 0.75)
srcpad = src.get_static_pad('src')
# works
#print(src.link(self.mix)) # True
# doesn't
#print(srcpad.link(sinkpad)) # Error => GST_PAD_LINK_WRONG_HIERARCHY
# but this does
sinkghostpad = self.create_ghostpad(sinkpad)
print(srcpad.link(sinkghostpad)) # True
class Example:
def __init__(self):
self.mainloop = GObject.MainLoop()
self.pipeline = Gst.Pipeline()
self.src = SrcBin()
self.sink = SinkBin()
self.mix = MixBin()
# Add elements to pipeline
self.pipeline.add(self.src)
self.pipeline.add(self.sink)
self.pipeline.add(self.mix)
self.mix.add_src(self.src)
self.mix.link(self.sink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
self.mainloop.run()
def kill(self):
self.pipeline.set_state(Gst.State.NULL)
self.mainloop.quit()
example = Example()
example.run()
| #!/usr/bin/python3
import gi, time
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst, GObject
GObject.threads_init()
Gst.init(None)
class SrcBin(Gst.Bin):
def __init__(self):
super().__init__()
self.src = Gst.ElementFactory.make('videotestsrc', 'src')
self.add(self.src)
self.add_pad(
Gst.GhostPad.new('src', self.src.get_static_pad('src'))
)
class SinkBin(Gst.Bin):
def __init__(self):
super().__init__()
self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
self.add(self.sink)
self.add_pad(
Gst.GhostPad.new('sink', self.sink.get_static_pad('sink'))
)
class MixBin(Gst.Bin):
def __init__(self):
super().__init__()
self.mix = Gst.ElementFactory.make('videomixer', 'src')
self.add(self.mix)
self.add_pad(
Gst.GhostPad.new('src', self.mix.get_static_pad('src'))
)
def add_src(self, src):
sinkpad = self.mix.get_request_pad('sink_%u')
sinkpad.set_property('alpha', 0.75)
srcpad = src.get_static_pad('src')
# works
#print(src.link(self.mix)) # True
# doesn't
print(srcpad.link(sinkpad)) # Error => GST_PAD_LINK_WRONG_HIERARCHY
class Example:
def __init__(self):
self.mainloop = GObject.MainLoop()
self.pipeline = Gst.Pipeline()
self.src = SrcBin()
self.sink = SinkBin()
self.mix = MixBin()
# Add elements to pipeline
self.pipeline.add(self.src)
self.pipeline.add(self.sink)
self.pipeline.add(self.mix)
self.mix.add_src(self.src)
self.mix.link(self.sink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
self.mainloop.run()
def kill(self):
self.pipeline.set_state(Gst.State.NULL)
self.mainloop.quit()
example = Example()
example.run()
| mit | Python |
b8e24b78feae538806c7de0f138623cbe179646c | Fix submit guard. | veroc/Bika-LIMS,rockfruit/bika.lims,anneline/Bika-LIMS,anneline/Bika-LIMS,veroc/Bika-LIMS,DeBortoliWines/Bika-LIMS,DeBortoliWines/Bika-LIMS,DeBortoliWines/Bika-LIMS,labsanmartin/Bika-LIMS,veroc/Bika-LIMS,anneline/Bika-LIMS,labsanmartin/Bika-LIMS,labsanmartin/Bika-LIMS,rockfruit/bika.lims | bika/lims/skins/bika/guard_submit_analysis.py | bika/lims/skins/bika/guard_submit_analysis.py | ## Script (Python) "guard_submit_analysis"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
wf_tool = context.portal_workflow
analyses = ['Analysis', 'DuplicateAnalysis', 'ReferenceAnalysis', ]
if context.portal_type in analyses:
dependencies = context.getDependencies()
if dependencies:
interim_fields = False
service = context.getService()
calculation = service.getCalculation()
if calculation:
interim_fields = calculation.getInterimFields()
for dep in dependencies:
review_state = wf_tool.getInfoFor(dep, 'review_state')
if interim_fields:
if review_state in ('sample_due', 'sample_received', 'attachment_due', 'to_be_verified',):
return False
else:
if review_state in ('sample_due', 'sample_received',):
return False
return True
elif context.portal_type == 'AnalysisRequest':
# Only transition to 'attachment_due' if all analyses are at least there.
for a in context.objectValues('Analysis'):
review_state = wf_tool.getInfoFor(a, 'review_state')
if review_state in ('sample_due', 'sample_received',):
return False
return True
| ## Script (Python) "guard_submit_analysis"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
wf_tool = context.portal_workflow
analyses = ['Analysis', 'DuplicateAnalysis', 'ReferenceAnalysis', ]
if context.portal_type in analyses:
if not context.getResult():
return False
dependencies = context.getDependencies()
if dependencies:
interim_fields = False
service = context.getService()
calculation = service.getCalculation()
if calculation:
interim_fields = calculation.getInterimFields()
for dep in dependencies:
review_state = wf_tool.getInfoFor(dep, 'review_state')
if interim_fields:
if review_state in ('sample_due', 'sample_received', 'attachment_due', 'to_be_verified',):
return False
else:
if review_state in ('sample_due', 'sample_received',):
return False
return True
elif context.portal_type == 'AnalysisRequest':
# Only transition to 'attachment_due' if all analyses are at least there.
for a in context.objectValues('Analysis'):
review_state = wf_tool.getInfoFor(a, 'review_state')
if review_state in ('sample_due', 'sample_received',):
return False
return True
| agpl-3.0 | Python |
6a8a6ba6a926eab3bf51dc9d4080d11b274799aa | Update copyright | ichenq/WinsockTut,ichenq/WinsockTut,ichenq/WinsockTut | tests/test_tcp_client.py | tests/test_tcp_client.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 ichenq@outlook.com. All rights reserved.
# Distributed under the terms and conditions of the Apache License.
# See accompanying files LICENSE.
#
import socket
import asyncore
import random
import pdb
class tcp_client(asyncore.dispatcher):
def __init__(self, host, port, msg):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.buffer = msg
self.send_bytes = 0
self.recv_bytes = 0
def handle_connect(self):
print(repr(self), 'connected')
def handle_close(self):
print(repr(self), 'bytes sent:', self.send_bytes, 'bytes read:', self.recv_bytes)
self.close()
def handle_read(self):
data = self.recv(8192)
self.recv_bytes += len(data)
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.send_bytes += sent
def create_clients(host, port, count, msg):
clients = []
for i in range(count):
c = tcp_client(host, port, msg)
clients.append(c)
return clients
def run_test():
host = '127.0.0.1'
port = 32450
maxcount = 512 # max 512
msg = 'GET /index.html HTTP/1.0\r\n\r\n'
count = random.randint(1, maxcount)
print(count, 'testing client')
clients = create_clients(host, port, count, msg)
asyncore.loop()
if __name__ == '__main__':
run_test()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import asyncore
import random
import pdb
class tcp_client(asyncore.dispatcher):
def __init__(self, host, port, msg):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.buffer = msg
self.send_bytes = 0
self.recv_bytes = 0
def handle_connect(self):
print(repr(self), 'connected')
def handle_close(self):
print(repr(self), 'bytes sent:', self.send_bytes, 'bytes read:', self.recv_bytes)
self.close()
def handle_read(self):
data = self.recv(8192)
self.recv_bytes += len(data)
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.send_bytes += sent
def create_clients(host, port, count, msg):
clients = []
for i in range(count):
c = tcp_client(host, port, msg)
clients.append(c)
return clients
def run_test():
host = '127.0.0.1'
port = 32450
maxcount = 512 # max 512
msg = 'GET /index.html HTTP/1.0\r\n\r\n'
count = random.randint(1, maxcount)
print(count, 'testing client')
clients = create_clients(host, port, count, msg)
asyncore.loop()
if __name__ == '__main__':
run_test()
| apache-2.0 | Python |
c57d0faf5828f750fe68446d927eb7271be25979 | add code for compiling notes/ section. also add code for creating a directory in the output folder if it doesnt already exist | nham/driveler | compile.py | compile.py | import sys
import os, shutil, subprocess
from glob import glob
site_title = 'wabbo'
include_dir = 'includes/'
exclude_files = ['readme.md']
out_folder = '_out/'
fname_no_ext = lambda fn: fn[:fn.index('.')]
def pandocConvert(pathto, fname):
dothtml = fname[:fname.index('.')] + '.html'
in_file = pathto + fname
out_file = out_folder + pathto + dothtml
pandoc_call = ['pandoc', '-s', in_file, '-t', 'html5', '-o', out_file,
'--include-in-header', include_dir+'header.html',
'--include-before-body', include_dir+'cover.html',
'--include-after-body', include_dir+'footer.html',
'--mathjax', '--smart', '--title-prefix', site_title]
p = subprocess.call(pandoc_call)
return
#return bytes.decode(p.communicate(bytes(source, 'UTF-8'))[0])
def isPage(fname):
pages = ['.md', '.rst']
return ('.' in fname) \
and (fname[fname.index('.'):] in pages) \
and fname not in exclude_files
# Compilation script begins here
for fname in os.listdir('./'):
if isPage(fname):
pandocConvert('', fname)
for fname in os.listdir('./blov/'):
if isPage(fname):
pandocConvert('blov/', fname)
for fname in os.listdir('./notes/'):
if isPage(fname) and fname_no_ext(fname) in ['linalg']:
out_dir = out_folder + 'notes/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
pandocConvert('notes/', fname)
| import sys
import os, shutil, subprocess
from glob import glob
site_title = 'wabbo'
include_dir = 'includes/'
exclude_files = ['readme.md']
out_folder = '_out/'
def pandocConvert(pathto, fname):
dothtml = fname[:fname.index('.')] + '.html'
in_file = pathto + fname
out_file = out_folder + pathto + dothtml
pandoc_call = ['pandoc', '-s', in_file, '-t', 'html5', '-o', out_file,
'--include-in-header', include_dir+'header.html',
'--include-before-body', include_dir+'cover.html',
'--include-after-body', include_dir+'footer.html',
'--mathjax', '--smart', '--title-prefix', site_title]
p = subprocess.call(pandoc_call)
return
#return bytes.decode(p.communicate(bytes(source, 'UTF-8'))[0])
def isPage(fname):
pages = ['.md', '.rst']
return ('.' in fname) \
and (fname[fname.index('.'):] in pages) \
and fname not in exclude_files
for fname in os.listdir('./'):
if isPage(fname):
pandocConvert('', fname)
for fname in os.listdir('./blov/'):
if isPage(fname):
pandocConvert('blov/', fname)
| cc0-1.0 | Python |
9caf509d2884d91453441140d855693666ce183d | update the script. | smileboywtu/MusicRecommend,smileboywtu/LTCodeSerialDecoder | music_recommend.py | music_recommend.py | #!/usr/bin/env python2
"""
this script use python3 and work period to get the
recommend music from netease.
"""
import sqlite3
from netease import NetEase
from apscheduler.schedulers.blocking import BlockingScheduler
# config
USERNAME = '15897970114'
PASSWORD = 'c651bf7febcc1f324a984529959a0950'
DATABASE_URL = 'recommend.db'
UPDATE_PERIOD = 30 * 60
def get_song_list():
instance = NetEase()
login_info = instance.login(USERNAME, PASSWORD)
playlist = instance.recommend_playlist()
songs = instance.dig_info(playlist, 'songs')
# get ten most ones
collection = []
for index, song in enumerate(songs):
if index == 10: break
item = {}
item['artist'] = song['artist']
item['song_name'] = song['song_name']
collection.append(item)
return collection
def start():
songs = get_song_list()
with sqlite3.connect(DATABASE_URL) as db:
cursor = db.cursor()
cursor.execute('delete from music_recommend')
for song in songs:
cursor.execute("insert into music_recommend(name, singer) values(?, ?)",
(song['song_name'], song['artist']))
db.commit()
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job(start, 'interval', seconds=UPDATE_PERIOD)
try:
scheduler.start()
except KeyboardInterrupt, SystemExit:
pass
| #!/usr/bin/env python2
"""
this script use python3 and work period to get the
recommend music from netease.
"""
import time
import sqlite3
from netease import NetEase
# config
USERNAME = 'your username'
PASSWORD = 'your password'
DATABASE_URL = 'recommend.db'
UPDATE_PERIOD = 6 * 60 * 60
def period(seconds):
""" use this to sleep the program."""
def schedule(func, *args):
while True:
func(*args)
time.sleep(seconds)
return schedule
def get_song_list():
instance = NetEase()
login_info = instance.login(USERNAME, PASSWORD)
playlist = instance.recommend_playlist()
songs = instance.dig_info(playlist, 'songs')
# get ten most ones
collection = []
for index, song in enumerate(songs):
if index == 10: break
item = {}
item['artist'] = song['artist']
item['song_name'] = song['song_name']
collection.append(item)
return collection
@period(UPDATE_PERIOD)
def start():
songs = get_song_list()
with sqlite3.connect(DATABASE_URL) as db:
cursor = db.cursor()
cursor.execute('delete from music_recommend')
for song in songs:
cursor.execute("insert into music_recommend(name, singer) values(?, ?)",
(song['song_name'], song['artist']))
db.commit()
if __name__ == '__main__':
start()
| apache-2.0 | Python |
e57d946543af9fca5c10efd43f4b313719ef8a75 | Update tests | ktok07b6/polyphony,ktok07b6/polyphony,ktok07b6/polyphony | tests/unroll/unroll01.py | tests/unroll/unroll01.py | from polyphony import testbench
from polyphony import unroll
def unroll01(xs, ys):
s = 0
for i in unroll(range(8)):
x = xs[i] + 1
if x < 0:
s = s + x
else:
s = s - x
ys[i] = x
#print(x)
return s
@testbench
def test():
data = [1, 2, 3, 4, 5, 6, 7, 8]
out_data = [0] * 8
s = unroll01(data, out_data)
print(s)
assert -44 == s
assert 2 == out_data[0]
assert 3 == out_data[1]
assert 4 == out_data[2]
assert 5 == out_data[3]
assert 6 == out_data[4]
assert 7 == out_data[5]
assert 8 == out_data[6]
assert 9 == out_data[7]
test()
| from polyphony import testbench
from polyphony import unroll
def unroll01(xs, ys):
s = 0
for i in range(8): # synth: unroll
x = xs[i] + 1
if x < 0:
s = s + x
else:
s = s - x
ys[i] = x
#print(x)
return s
@testbench
def test():
data = [1, 2, 3, 4, 5, 6, 7, 8]
out_data = [0] * 8
s = unroll01(data, out_data)
print(s)
assert -44 == s
assert 2 == out_data[0]
assert 3 == out_data[1]
assert 4 == out_data[2]
assert 5 == out_data[3]
assert 6 == out_data[4]
assert 7 == out_data[5]
assert 8 == out_data[6]
assert 9 == out_data[7]
test()
| mit | Python |
8ad3d29336e74b39d2daac5a6c8f6b50b1efa9b7 | Refactor into main function | robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions | 2015/python/2015-03.py | 2015/python/2015-03.py | #!/usr/local/bin/python3
from collections import namedtuple
import pathlib
def main(puzzle_input):
Point = namedtuple('Point', ['x', 'y'])
location = Point(0, 0)
visited = {location}
def new_loc(current_loc, instruction):
new_loc_table = {
'^': (current_loc.x, current_loc.y + 1),
'v': (current_loc.x, current_loc.y - 1),
'>': (current_loc.x + 1, current_loc.y),
'<': (current_loc.x - 1, current_loc.y)
}
return Point(*new_loc_table[instruction])
for char in puzzle_input:
location = new_loc(location, char)
visited.add(location)
print('At least one present:', len(visited))
# Part two
santa_loc = Point(0, 0)
robo_loc = Point(0, 0)
visited = {santa_loc}
for idx, char in enumerate(puzzle_input):
if idx % 2 == 0: # Santa
santa_loc = new_loc(santa_loc, char)
visited.add(santa_loc)
else: # robot
robo_loc = new_loc(robo_loc, char)
visited.add(robo_loc)
print('At least one present with santa and robot:', len(visited))
if __name__ == '__main__':
puzzle_input = pathlib.Path('../input/2015-03.txt').read_text().rstrip()
main(puzzle_input)
| #!/usr/local/bin/python3
from collections import namedtuple
with open('../input/2015-03.txt') as f:
instructions = f.read().rstrip()
Point = namedtuple('Point', ['x', 'y'])
location = Point(0, 0)
visited = {location}
def new_loc(current_loc, instruction):
new_loc_table = {
'^': (current_loc.x, current_loc.y + 1),
'v': (current_loc.x, current_loc.y - 1),
'>': (current_loc.x + 1, current_loc.y),
'<': (current_loc.x - 1, current_loc.y)
}
return Point(*new_loc_table[instruction])
for char in instructions:
location = new_loc(location, char)
visited.add(location)
print('At least one present:', len(visited))
# Part two
santa_loc = Point(0, 0)
robo_loc = Point(0, 0)
visited = {santa_loc}
for idx, char in enumerate(instructions):
if idx % 2 == 0: # Santa
santa_loc = new_loc(santa_loc, char)
visited.add(santa_loc)
else: # robot
robo_loc = new_loc(robo_loc, char)
visited.add(robo_loc)
print('At least one present with santa and robot:', len(visited))
| mit | Python |
cb2547ecf40aa18e9bdf4688fe0f552e2f02a738 | update example | thefab/tornadis,thefab/tornadis | examples/pubsub.py | examples/pubsub.py | import tornado
import tornadis
@tornado.gen.coroutine
def pubsub_coroutine():
# Let's get a connected client
client = tornadis.PubSubClient()
yield client.connect()
# Let's "psubscribe" to a pattern
yield client.pubsub_psubscribe("foo*")
# Let's "subscribe" to a channel
yield client.pubsub_subscribe("bar")
# Looping over received messages
while True:
# Let's "block" until a message is available
msg = yield client.pubsub_pop_message()
print(msg)
# >>> ['pmessage', 'foo*', 'foo', 'bar']
# (for a "publish foo bar" command from another connection)
if msg[3] == "STOP":
# it's a STOP message, let's unsubscribe and quit the loop
yield client.pubsub_punsubscribe("foo*")
yield client.pubsub_unsubscribe("bar")
break
# Let's disconnect
yield client.disconnect()
def stop_loop(future=None):
excep = future.exception()
if excep is not None:
raise(excep)
loop.stop()
loop = tornado.ioloop.IOLoop.instance()
loop.add_future(pubsub_coroutine(), stop_loop)
loop.start()
| import tornado
import tornadis
@tornado.gen.coroutine
def pubsub():
client = tornadis.Client()
yield client.connect()
yield client.pubsub_psubscribe("foo*")
yield client.pubsub_subscribe("bar")
while True:
reply = yield client.pubsub_pop_message()
print(reply)
if reply[3] == "STOP":
yield client.pubsub_punsubscribe("foo*")
yield client.pubsub_unsubscribe("bar")
break
yield client.disconnect()
def stop_loop(future=None):
excep = future.exception()
if excep is not None:
raise(excep)
loop = tornado.ioloop.IOLoop.instance()
loop.stop()
loop = tornado.ioloop.IOLoop.instance()
future = pubsub()
loop.add_future(future, stop_loop)
loop.start()
| mit | Python |
2ce16a9903ab07c98d805988e82d88c7c8c64e47 | simplify definition of sqrtm grad, move to top of file | barak/autograd,HIPS/autograd,kcarnold/autograd,hips/autograd,HIPS/autograd,hips/autograd | autograd/scipy/linalg.py | autograd/scipy/linalg.py | from __future__ import division
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.numpy.linalg import atleast_2d_col as al2d
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
sqrtm.defgrad(lambda ans, A, **kwargs: lambda g: solve_lyapunov(ans, g))
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
def make_grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
def solve_triangular_grad(g):
v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
return -transpose(tri(anp.dot(v, al2d(ans).T)))
return solve_triangular_grad
solve_triangular.defgrad(make_grad_solve_triangular)
solve_triangular.defgrad(lambda ans, a, b, trans=0, lower=False, **kwargs: lambda g:
solve_triangular(a, g, trans=_flip(a, trans), lower=lower), argnum=1)
| from __future__ import division
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.numpy.linalg import atleast_2d_col as al2d
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
def make_grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
def solve_triangular_grad(g):
v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
return -transpose(tri(anp.dot(v, al2d(ans).T)))
return solve_triangular_grad
solve_triangular.defgrad(make_grad_solve_triangular)
solve_triangular.defgrad(lambda ans, a, b, trans=0, lower=False, **kwargs: lambda g:
solve_triangular(a, g, trans=_flip(a, trans), lower=lower), argnum=1)
def make_grad_sqrtm(ans, A, **kwargs):
def sqrtm_grad(g):
return solve_lyapunov(ans, g)
return sqrtm_grad
sqrtm.defgrad(make_grad_sqrtm)
| mit | Python |
3f6bfdb407085ddac560e48d43277a897df48c8d | Update version.py | tenable/pyTenable | tenable/version.py | tenable/version.py | version = '1.3.0'
version_info = tuple(int(d) for d in version.split("-")[0].split("."))
| version = '1.2.8'
version_info = tuple(int(d) for d in version.split("-")[0].split("."))
| mit | Python |
db846990e010014f023324d5b687caeb13bc9725 | fix script so that it actually works! | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/climodat/dump.py | scripts/climodat/dump.py | import constants
from pyIEM import iemdb
import network
nt = network.Table("IACLIMATE")
i = iemdb.iemdb()
coop = i['coop']
for id in nt.sts.keys():
fn = "coop_data/%s.csv" % (nt.sts[id]['name'].replace(" ", "_"), )
out = open(fn, 'w')
out.write("station,station_name,lat,lon,day,high,low,precip,snow,\n")
sql = "SELECT * from %s WHERE station = '%s' ORDER by day ASC" % (
constants.get_table(id), id)
rs = coop.query(sql).dictresult()
for i in range(len(rs)):
out.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,\n" % (id.lower(), nt.sts[id]['name'],
nt.sts[id]['lat'], nt.sts[id]['lon'],
rs[i]['day'], rs[i]['high'], rs[i]['low'], rs[i]['precip'], rs[i]['snow']) )
out.close()
| import constants
from pyIEM import iemdb
import network
nt = network.Table("IACLIMATE")
i = iemdb.iemdb()
coop = i['coop']
for id in nt.sts.keys():
fn = "coop_data/%s.csv" % (st.sts[id]['name'].replace(" ", "_"), )
out = open(fn, 'w')
out.write("station,station_name,lat,lon,day,high,low,precip,snow,\n")
sql = "SELECT * from %s WHERE station = '%s' ORDER by day ASC" \
% (constants.get_table(id), id.lower(), )
rs = coop.query(sql).dictresult()
for i in range(len(rs)):
out.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,\n" % (id.lower(), st.sts[id]['name'], st.sts[id]['lat'], st.sts[id]['lon'],\
rs[i]['day'], rs[i]['high'], rs[i]['low'], rs[i]['precip'], rs[i]['snow']) )
out.close()
| mit | Python |
a999e37136ff17739bbd2e782fc4c47767489908 | add moonscript and lua | yjpark/silp | silp/language.py | silp/language.py | import silp
class Language:
def __init__(self, name, extension, macro_prefix,
generated_suffix, columns):
self.name = name
self.extension = extension
self.macro_prefix = macro_prefix
self.generated_suffix = generated_suffix
self.columns = columns
languages = [
Language('Python', '.py', '#SILP:', '#__SILP__\n', 80),
Language('C#', '.cs', '//SILP:', '//__SILP__\n', 80),
Language('Go', '.go', '//SILP:', '//__SILP__\n', 80),
Language('Freshrc', '.freshrc', '#SILP:', '#__SILP__\n', 80),
Language('YML', '.yml', '#SILP:', '#__SILP__\n', 80),
Language('Swift', '.swift', '//SILP:', '//__SILP__\n', 80),
Language('Objective-C', '.m', '//SILP:', '//__SILP__\n', 80),
Language('Objective-C++', '.mm', '//SILP:', '//__SILP__\n', 80),
Language('Moonscript', '.moon', '--SILP:', '--__SILP__\n', 80),
Language('Lua', '.lua', '--SILP:', '--__SILP__\n', 80),
]
| import silp
class Language:
def __init__(self, name, extension, macro_prefix,
generated_suffix, columns):
self.name = name
self.extension = extension
self.macro_prefix = macro_prefix
self.generated_suffix = generated_suffix
self.columns = columns
languages = [
Language('Python', '.py', '#SILP:', '#__SILP__\n', 80),
Language('C#', '.cs', '//SILP:', '//__SILP__\n', 80),
Language('Go', '.go', '//SILP:', '//__SILP__\n', 80),
Language('Freshrc', '.freshrc', '#SILP:', '#__SILP__\n', 80),
Language('YML', '.yml', '#SILP:', '#__SILP__\n', 80),
Language('Swift', '.swift', '//SILP:', '//__SILP__\n', 80),
Language('Objective-C', '.m', '//SILP:', '//__SILP__\n', 80),
Language('Objective-C++', '.mm', '//SILP:', '//__SILP__\n', 80),
]
| mit | Python |
422b559b07f179e93423530155a86bc7765142c8 | Update XSS-injection.py | nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017,nwiizo/workspace_2017 | pen_test_code/XSS-injection.py | pen_test_code/XSS-injection.py | import mechanize
url = "http://www.webscantest.com/crosstraining/aboutyou.php"
browser = mechanize.Browser()
attackNumber = 1
with open('XSS-vectors.txt') as f:
for line in f:
browser.open(url)
browser.select_form(nr=0)
browser["fname"] = line
res = browser.submit()
content = res.read()
# check the attack vector is printed in the response.
if content.find(line) > 0:
print("Possible XXS")
output = open('response/'+str(attackNumber)+'.txt', 'w')
output.write(content)
output.close()
print(attackNumber)
attackNumber += 1
| import mechanize
url = "http://www.webscantest.com/crosstraining/aboutyou.php"
browser = mechanize.Browser()
attackNumber = 1
with open('XSS-vectors.txt') as f:
for line in f:
browser.open(url)
browser.select_form(nr=0)
browser["fname"] = line
res = browser.submit()
content = res.read()
# check the attack vector is printed in the response.
if content.find(line) > 0:
print "Possible XXS"
output = open('response/'+str(attackNumber)+'.txt', 'w')
output.write(content)
output.close()
print attackNumber
attackNumber += 1
| mit | Python |
767131c3addcc182b8b0c73a977df3540b282500 | Remove problematic imports in __init__ -- causing cycles and weirdness. | spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc | thinc/neural/__init__.py | thinc/neural/__init__.py | from ._classes.model import Model
from ._classes.affine import Affine
from ._classes.relu import ReLu
from ._classes.softmax import Softmax
from ._classes.elu import ELU
from ._classes.maxout import Maxout
from .pooling import Pooling, mean_pool, max_pool
from ._classes.convolution import ExtractWindow
from ._classes.batchnorm import BatchNorm
from ._classes.difference import Siamese
| from ._classes.model import Model
from ._classes.affine import Affine
from ._classes.relu import ReLu
from ._classes.softmax import Softmax
from ._classes.elu import ELU
from ._classes.maxout import Maxout
from ._classes.embed import Embed
from ._classes.static_vectors import StaticVectors
#from ._classes.embed import HashEmbed
#from .pooling import Pooling, mean_pool, max_pool
from ._classes.convolution import ExtractWindow
#from ._classes.batchnorm import BatchNorm
#from ._classes.difference import Siamese
| mit | Python |
89ffd5dbaed09b99247fe9a93cbea8aea7acac1f | Change PROXY_HOST_HTTPS in settings | heynemann/thumborizeme,heynemann/thumborizeme,heynemann/thumborizeme | thumborizeme/settings.py | thumborizeme/settings.py | import os
# REDIS
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', '')
HOST = os.environ.get('HOST', 'http://localhost:9000')
THUMBOR_HOST = os.environ.get('THUMBOR_HOST', 'http://localhost:8001')
# PROXY
PROXY_HOST = os.environ.get('PROXY_HOST', '')
PROXY_HOST_HTTPS = os.environ.get('PROXY_HOST_HTTPS', '')
PROXY_PORT = os.environ.get('PROXY_PORT', 0)
| import os
# REDIS
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', '')
HOST = os.environ.get('HOST', 'http://localhost:9000')
THUMBOR_HOST = os.environ.get('THUMBOR_HOST', 'http://localhost:8001')
# PROXY
PROXY_HOST = os.environ.get('PROXY_HOST', '')
PROXY_HOST_HTTPS = os.environ.get('PROXY_HOST', '')
PROXY_PORT = os.environ.get('PROXY_PORT', 0)
| mit | Python |
06f2b79ceba640b57ec3323a37f0204df5f26bb3 | Fix the test settings. | hello-base/web,hello-base/web,hello-base/web,hello-base/web | base/settings/testing.py | base/settings/testing.py | # -*- coding: utf-8 -*-
from configurations import values
from .base import Base as Settings
class Testing(Settings):
# Debug Settings.
# --------------------------------------------------------------------------
DEBUG = values.BooleanValue(False)
TEMPLATE_DEBUG = values.BooleanValue(False)
# Database Configuration.
# --------------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
# django-celery.
# --------------------------------------------------------------------------
Settings.INSTALLED_APPS += ['kombu.transport.django']
BROKER_URL = 'django://'
# django-haystack.
# --------------------------------------------------------------------------
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Static File Configuration.
# --------------------------------------------------------------------------
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# Media Storage Configuration.
# --------------------------------------------------------------------------
# Amazon Web Services
AWS_STORAGE_BUCKET_NAME = 'test-bucket'
# django-s3-folder-storage
CDN_DOMAIN = 'cdn.example.net'
MEDIA_URL = 'https://%s/media/' % (CDN_DOMAIN)
# Authentication Configuration.
# --------------------------------------------------------------------------
HELLO_BASE_CLIENT_ID = 'client-id'
HELLO_BASE_CLIENT_SECRET = 'client-secret'
OAUTH_AUTHORIZATION_URL = 'https://testserver/oauth/authorize/'
OAUTH_TOKEN_URL = 'https://testserver/oauth/token/'
| # -*- coding: utf-8 -*-
from configurations import values
from .base import Base as Settings
class Testing(Settings):
# Debug Settings.
# --------------------------------------------------------------------------
DEBUG = values.BooleanValue(False)
TEMPLATE_DEBUG = values.BooleanValue(False)
# Database Configuration.
# --------------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
# django-celery.
# --------------------------------------------------------------------------
Settings.INSTALLED_APPS += ['kombu.transport.django']
BROKER_URL = 'django://'
# django-haystack.
# --------------------------------------------------------------------------
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Media Storage Configuration.
# --------------------------------------------------------------------------
# Amazon Web Services
AWS_STORAGE_BUCKET_NAME = 'test-bucket'
# django-s3-folder-storage
DEFAULT_S3_PATH = 'media'
STATIC_S3_PATH = 'static'
CDN_DOMAIN = 'cdn.example.net'
MEDIA_URL = 'https://%s/%s/' % (CDN_DOMAIN, DEFAULT_S3_PATH)
STATIC_URL = 'https://%s/%s/' % (CDN_DOMAIN, STATIC_S3_PATH)
# Authentication Configuration.
# --------------------------------------------------------------------------
HELLO_BASE_CLIENT_ID = 'client-id'
HELLO_BASE_CLIENT_SECRET = 'client-secret'
OAUTH_AUTHORIZATION_URL = 'https://testserver/oauth/authorize/'
OAUTH_TOKEN_URL = 'https://testserver/oauth/token/'
| apache-2.0 | Python |
f95aa5b36a354fe3cfd94b43d8f0f6346ec400de | Disable setting thread_name_prefix in ThreadPoolExecutor (only supported in Python >= 3.6) | xmikos/soapy_power,xmikos/soapy_power | soapypower/threadpool.py | soapypower/threadpool.py | import os, queue, concurrent.futures
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"""ThreadPoolExecutor which allows setting max. work queue size"""
def __init__(self, max_workers=0, thread_name_prefix='', max_queue_size=0):
#super().__init__(max_workers or os.cpu_count() or 1, thread_name_prefix)
super().__init__(max_workers or os.cpu_count() or 1)
self.max_queue_size = max_queue_size or self._max_workers * 10
if self.max_queue_size > 0:
self._work_queue = queue.Queue(self.max_queue_size)
self.max_queue_size_reached = 0
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached.
"""
future = super().submit(fn, *args, **kwargs)
work_queue_size = self._work_queue.qsize()
if work_queue_size > self.max_queue_size_reached:
self.max_queue_size_reached = work_queue_size
return future
| import os, queue, concurrent.futures
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"""ThreadPoolExecutor which allows setting max. work queue size"""
def __init__(self, max_workers=0, thread_name_prefix='', max_queue_size=0):
super().__init__(max_workers or os.cpu_count() or 1, thread_name_prefix)
self.max_queue_size = max_queue_size or self._max_workers * 10
if self.max_queue_size > 0:
self._work_queue = queue.Queue(self.max_queue_size)
self.max_queue_size_reached = 0
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached.
"""
future = super().submit(fn, *args, **kwargs)
work_queue_size = self._work_queue.qsize()
if work_queue_size > self.max_queue_size_reached:
self.max_queue_size_reached = work_queue_size
return future
| mit | Python |
edbe6f7707afac9afa5e201c9e1fc17fad05e548 | Reorder imports | oldarmyc/pitchfork,oldarmyc/pitchfork,rackerlabs/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork,rackerlabs/pitchfork | pitchfork/setup_application.py | pitchfork/setup_application.py | # Copyright 2014 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from inspect import getmembers, isfunction
from flask_cloudauth import CloudAuth
from flask_cloudengine import Engine
from flask_cloudadmin import Admin
from happymongo import HapPyMongo
from datetime import timedelta
from flask import Flask, g
from config import config
import template_functions
import template_filters
import defaults
import logging
import views
def create_app(db_name=None):
app = Flask(__name__)
app.config.from_object(config)
if db_name:
config.MONGO_DATABASE = db_name
Admin(app)
Engine(app)
mongo, db = HapPyMongo(config)
app.permanent_session_lifetime = timedelta(hours=2)
auth = CloudAuth(app, db)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(template_functions.utility_processor)
@app.before_first_request
def logger():
if not app.debug:
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.before_request
def before_request():
g.db, g.auth = db, auth
defaults.application_initialize(db, app)
views.ProductsView.register(app)
views.MiscView.register(app)
views.FeedbackView.register(app)
views.GlobalManageView.register(app)
if db_name:
return app, db
else:
return app
| # Copyright 2014 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from inspect import getmembers, isfunction
from flask_cloudauth import CloudAuth
from flask_cloudengine import Engine
from flask_cloudadmin import Admin
from happymongo import HapPyMongo
from datetime import timedelta
from flask import Flask, g
from config import config
import views
import logging
import defaults
import template_filters
import template_functions
def create_app(db_name=None):
app = Flask(__name__)
app.config.from_object(config)
if db_name:
config.MONGO_DATABASE = db_name
Admin(app)
Engine(app)
mongo, db = HapPyMongo(config)
app.permanent_session_lifetime = timedelta(hours=2)
auth = CloudAuth(app, db)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(template_functions.utility_processor)
@app.before_first_request
def logger():
if not app.debug:
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.before_request
def before_request():
g.db, g.auth = db, auth
defaults.application_initialize(db, app)
views.ProductsView.register(app)
views.MiscView.register(app)
views.FeedbackView.register(app)
views.GlobalManageView.register(app)
if db_name:
return app, db
else:
return app
| apache-2.0 | Python |
549fd2c0b7a60d55953f15223fac9e59fd780089 | make odoo2odoo installable | jobiols/odoo-addons,jobiols/odoo-addons,jobiols/odoo-addons,jobiols/odoo-addons | odoo2odoo/__openerp__.py | odoo2odoo/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 JEOSOFT (http://www.jeosoft.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Odoo to Odoo',
'version': '9.0.1.0.0',
'category': 'Connector',
'sequence': 14,
'summary': '',
'description': """
Replicate data between Odoo Databases
=====================================
""",
'author': 'jeo Soft',
'website': 'jeosoft.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'connector'
],
'data': [
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 JEOSOFT (http://www.jeosoft.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Odoo to Odoo',
'version': '9.0.1.0.0',
'category': 'Connector',
'sequence': 14,
'summary': '',
'description': """
Replicate data between Odoo Databases
=====================================
""",
'author': 'jeo Soft',
'website': 'jeosoft.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'connector'
],
'data': [
],
'demo': [
],
'test': [
],
'installable': False,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
c30010e5a97cc7926d8c78cecbc01dfe2cb8550a | Use Vertex in STLWriter | onitake/Uranium,onitake/Uranium | plugins/STLWriter/STLWriter.py | plugins/STLWriter/STLWriter.py | from Cura.MeshHandling.MeshWriter import MeshWriter
import time
import struct
class STLWriter(MeshWriter):
def __init__(self):
super(STLWriter, self).__init__()
self._supported_extension = ".stl"
#TODO: Only a single mesh can be saved to a single file, we might want to save multiple meshes to a single file
def write(self, file_name, storage_device, mesh_data):
if(self._supported_extension in file_name):
f = storage_device.openFile(file_name, 'wb')
f.write(("PLUGGABLE UNICORN BINARY STL EXPORT. " + time.strftime('%a %d %b %Y %H:%M:%S')).ljust(80, '\000'))
num_verts = mesh_data.getNumVerts()
f.write(struct.pack("<I", int(num_verts / 3))) #Write number of faces to STL
for index in xrange(0, num_verts, 3):
verts = mesh_data.getVerts()
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
f.write(struct.pack("<fff", 0.0, 0.0, 0.0))
vertPos = v1.getPosition()
f.write(struct.pack("<fff", vertPos.x, vertPos.y, vertPos.z))
vertPos = v2.getPosition()
f.write(struct.pack("<fff", vertPos.x, vertPos.y, vertPos.z))
vertPos = v3.getPosition()
f.write(struct.pack("<fff", vertPos.x, vertPos.y, vertPos.z))
f.write(struct.pack("<H", 0))
f.close()
return True
else:
return False
| from Cura.MeshHandling.MeshWriter import MeshWriter
import time
import struct
class STLWriter(MeshWriter):
def __init__(self):
super(STLWriter, self).__init__()
self._supported_extension = ".stl"
#TODO: Only a single mesh can be saved to a single file, we might want to save multiple meshes to a single file
def write(self, file_name, storage_device, mesh_data):
if(self._supported_extension in file_name):
f = storage_device.openFile(file_name, 'wb')
f.write(("PLUGGABLE UNICORN BINARY STL EXPORT. " + time.strftime('%a %d %b %Y %H:%M:%S')).ljust(80, '\000'))
num_verts = mesh_data.getNumVerts()
f.write(struct.pack("<I", int(num_verts / 3))) #Write number of faces to STL
for index in xrange(0, num_verts, 3):
verts = mesh_data.getVerts()
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
f.write(struct.pack("<fff", 0.0, 0.0, 0.0))
f.write(struct.pack("<fff", v1[0], v1[1], v1[2]))
f.write(struct.pack("<fff", v2[0], v2[1], v2[2]))
f.write(struct.pack("<fff", v3[0], v3[1], v3[2]))
f.write(struct.pack("<H", 0))
f.close()
return True
else:
return False | agpl-3.0 | Python |
752c926a8ebd194b5bb24606e6382e84c9274120 | adjust comment | 4dn-dcic/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,4dn-dcic/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront | src/encoded/types/antibody.py | src/encoded/types/antibody.py | """The type file for the collection Antibody.
logic for autopopulating 'antibody_id' unique key upon update or create
"""
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
lab_award_attribution_embed_list
)
from .dependencies import DependencyEmbedder
import string
import re
def _build_antibody_embedded_list():
""" Helper function intended to be used to create the embedded list for antibody.
All types should implement a function like this going forward.
"""
antibody_target_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='antibody_target', t='bio_feature')
return Item.embedded_list + lab_award_attribution_embed_list + antibody_target_embeds + [
# Vendor linkTo
'antibody_vendor.title'
]
@collection(
name='antibodys',
unique_key='antibody:antibody_id',
properties={
'title': 'Antibodies',
'description': 'Listing of antibodies',
})
class Antibody(Item):
"""Antibody class."""
item_type = 'antibody'
schema = load_schema('encoded:schemas/antibody.json')
name_key = 'antibody_id'
embedded_list = _build_antibody_embedded_list()
def _update(self, properties, sheets=None):
# set antibody_id based on values of antibody_name and product_no
exclude = re.escape(string.punctuation)
regex = r"[" + exclude + r"\s]+"
abid = properties['antibody_name'] + '-' + properties['antibody_product_no']
abid = re.sub(regex, "-", abid)
properties['antibody_id'] = abid
super(Antibody, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, antibody_name, antibody_product_no=None):
if antibody_product_no:
antibody_name += ' ({})'.format(antibody_product_no)
return antibody_name
| """The type file for the collection Antibody.
logic for autopopulating 'antibody_id' unique key upon update or create
"""
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
lab_award_attribution_embed_list
)
from .dependencies import DependencyEmbedder
import string
import re
def _build_antibody_embedded_list():
""" Helper function intended to be used to create the embedded list for ontology_term.
All types should implement a function like this going forward.
"""
antibody_target_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='antibody_target', t='bio_feature')
return Item.embedded_list + lab_award_attribution_embed_list + antibody_target_embeds + [
# Vendor linkTo
'antibody_vendor.title'
]
@collection(
name='antibodys',
unique_key='antibody:antibody_id',
properties={
'title': 'Antibodies',
'description': 'Listing of antibodies',
})
class Antibody(Item):
"""Antibody class."""
item_type = 'antibody'
schema = load_schema('encoded:schemas/antibody.json')
name_key = 'antibody_id'
embedded_list = _build_antibody_embedded_list()
def _update(self, properties, sheets=None):
# set antibody_id based on values of antibody_name and product_no
exclude = re.escape(string.punctuation)
regex = r"[" + exclude + r"\s]+"
abid = properties['antibody_name'] + '-' + properties['antibody_product_no']
abid = re.sub(regex, "-", abid)
properties['antibody_id'] = abid
super(Antibody, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, antibody_name, antibody_product_no=None):
if antibody_product_no:
antibody_name += ' ({})'.format(antibody_product_no)
return antibody_name
| mit | Python |
0425a3ce6ab44c7abae4d791d17db0305b436764 | bump to 0.9.11 | martin-hunt/hublib | hublib/__init__.py | hublib/__init__.py | from pint import UnitRegistry
ureg = UnitRegistry()
ureg.autoconvert_offset_to_baseunit = True
Q_ = ureg.Quantity
__version__ = "0.9.11"
| from pint import UnitRegistry
ureg = UnitRegistry()
ureg.autoconvert_offset_to_baseunit = True
Q_ = ureg.Quantity
__version__ = "0.9.10"
| mit | Python |
67eb081fa8fec0849646bbbcf6305279357f556d | Refactor the fetch command line dispatcher | cgeoffroy/son-analyze,cgeoffroy/son-analyze | src/son_analyze/cli/fetch_cmd.py | src/son_analyze/cli/fetch_cmd.py | # Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""son-analyze fetch command"""
# pylint: disable=unsubscriptable-object
from enum import Enum
import logging
from urllib.parse import ParseResult
from typing import Dict, Iterable, Any
import yaml # type: ignore
from son_analyze.ops import fetch
from son_analyze.core import types
_LOGGER = logging.getLogger(__name__)
def _print_yml_to_stdout(values: Iterable[Dict[str, Any]]) -> None:
"""Print the fetch result to the console."""
print(yaml.dump_all(values))
class _Dispatcher(Enum):
vnfd = (fetch.fetch_vnfd_by_uuid, fetch.fetch_vnfd)
nsd = (fetch.fetch_nsd_by_uuid, fetch.fetch_nsd)
vnfr = (fetch.fetch_vnfr_by_uuid, fetch.fetch_vnfr)
nsr = (fetch.fetch_nsr_by_uuid, fetch.fetch_nsr)
def __init__(self, ffetch_by_uuid, ffetch):
self.fetch_by_uuid = ffetch_by_uuid
self.fetch = ffetch
def fetch_cmd(gatekeeper: ParseResult, skind: str,
target: types.ResourceTargetTuple) -> None:
"""Fetch a vnfd or a nsd (with its dependencies) and display the result
as Yaml documents on STDOUT."""
try:
kind = _Dispatcher[skind] # type: ignore
except KeyError:
raise RuntimeError('Invalid resource type {}'.format(skind))
if target.uuid:
res = kind.fetch_by_uuid(gatekeeper, target.uuid)
else:
res = kind.fetch(gatekeeper, target.vendor, target.name,
target.version)
if isinstance(res, tuple):
base, docs = res
_print_yml_to_stdout([base] + list(docs.values()))
else:
_print_yml_to_stdout([res])
return
| # Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""son-analyze fetch command"""
# pylint: disable=unsubscriptable-object
import logging
from urllib.parse import ParseResult
from typing import Dict, Iterable, Any
import yaml # type: ignore
from son_analyze.ops import fetch
from son_analyze.core import types
_LOGGER = logging.getLogger(__name__)
def _print_yml_to_stdout(values: Iterable[Dict[str, Any]]) -> None:
"""Print the fetch result to the console."""
print(yaml.dump_all(values))
def fetch_cmd(gatekeeper: ParseResult, kind: str,
target: types.ResourceTargetTuple) -> None:
"""Fetch a vnfd or a nsd (with its dependencies) and display the result
as Yaml documents on STDOUT."""
if kind == 'vnfd':
if target.uuid:
vnfd = fetch.fetch_vnfd_by_uuid(gatekeeper, target.uuid)
else:
vnfd = fetch.fetch_vnfd(gatekeeper, target.vendor,
target.name, target.version)
_print_yml_to_stdout([vnfd])
elif kind == 'nsd':
if target.uuid:
(nsd, vnfds) = fetch.fetch_nsd_by_uuid(gatekeeper, target.uuid)
else:
(nsd, vnfds) = fetch.fetch_nsd(gatekeeper, target.vendor,
target.name, target.version)
_print_yml_to_stdout([nsd] + list(vnfds.values()))
else:
raise RuntimeError('Invalid resource type {}'.format(kind))
return
| apache-2.0 | Python |
5b6545f385b031fb2629370ec15dd2770bc27533 | revert test to command_order | wooey/Wooey,wooey/Wooey,wooey/Wooey,wooey/Wooey | wooey/tests/scripts/command_order.py | wooey/tests/scripts/command_order.py | import argparse
import sys
parser = argparse.ArgumentParser(description="Something")
parser.add_argument('link', help='the url containing the metadata')
parser.add_argument('name', help='the name of the file')
if __name__ == '__main__':
args = parser.parse_args()
sys.stderr.write('{} {}'.format(args.link, args.name))
| import argparse
import sys
parser = argparse.ArgumentParser(description="Something")
parser.add_argument('linknew', help='the url containing the metadata')
parser.add_argument('name', help='the name of the file')
if __name__ == '__main__':
args = parser.parse_args()
sys.stderr.write('{} {}'.format(args.link, args.name))
| bsd-3-clause | Python |
d817078bcb73c2d65677ef660ba43c9859230ee9 | Change get_terminal_size from os to shutil | Kynarth/pokediadb | pokediadb/log.py | pokediadb/log.py | """Module to display different kind of messages in the terminal."""
import shutil
import textwrap
import click
from pokediadb.enums import Log
def format_message(msg, msg_type):
"""Format a message in function of message type and terminal's size.
Args:
msg (str): Message to format.
msg_type (Log): Kind of message.
Returns:
str: Return formatted message.
"""
term_size = shutil.get_terminal_size()
return textwrap.fill(
msg, width=term_size.columns, initial_indent="",
subsequent_indent=" " * len(msg_type.value)
)
def info(msg, verbose=True):
"""Display information message.
Args:
msg (str): Message to display
verbose (bool): If True the message is displayed.
Returns:
str: Return formatted message.
"""
if not verbose:
return
click.secho(Log.INFO.value, fg="green", bold=True, nl=False)
full_msg = Log.INFO.value + msg
msg = format_message(full_msg, Log.INFO)[len(Log.INFO.value):]
click.secho(msg, fg="green")
return full_msg
# def warning(msg):
# """Display warning message.
# Args:
# msg (str): Message to display
# Returns:
# str: Return formatted message.
# """
# click.secho(Log.WARNING.value, fg="yellow", bold=True, nl=False)
# full_msg = Log.WARNING.value + msg
# msg = format_message(full_msg, Log.WARNING)[len(Log.WARNING.value):]
# click.secho(msg, fg="yellow")
# return full_msg
def error(msg):
"""Display error message.
Args:
msg (str): Message to display
Returns:
str: Return formatted message.
"""
click.secho(Log.ERROR.value, fg="red", bold=True, nl=False, err=True)
full_msg = Log.ERROR.value + msg
msg = format_message(full_msg, Log.ERROR)[len(Log.ERROR.value):]
click.secho(msg, fg="red", err=True)
return full_msg
| """Module to display different kind of messages in the terminal."""
import os
import textwrap
import click
from pokediadb.enums import Log
def format_message(msg, msg_type):
"""Format a message in function of message type and terminal's size.
Args:
msg (str): Message to format.
msg_type (Log): Kind of message.
Returns:
str: Return formatted message.
"""
try:
term_size = os.get_terminal_size()
except OSError:
term_size = os.terminal_size((80, 24))
return textwrap.fill(
msg, width=term_size.columns, initial_indent="",
subsequent_indent=" " * len(msg_type.value)
)
def info(msg, verbose=True):
"""Display information message.
Args:
msg (str): Message to display
verbose (bool): If True the message is displayed.
Returns:
str: Return formatted message.
"""
if not verbose:
return
click.secho(Log.INFO.value, fg="green", bold=True, nl=False)
full_msg = Log.INFO.value + msg
msg = format_message(full_msg, Log.INFO)[len(Log.INFO.value):]
click.secho(msg, fg="green")
return full_msg
# def warning(msg):
# """Display warning message.
# Args:
# msg (str): Message to display
# Returns:
# str: Return formatted message.
# """
# click.secho(Log.WARNING.value, fg="yellow", bold=True, nl=False)
# full_msg = Log.WARNING.value + msg
# msg = format_message(full_msg, Log.WARNING)[len(Log.WARNING.value):]
# click.secho(msg, fg="yellow")
# return full_msg
def error(msg):
"""Display error message.
Args:
msg (str): Message to display
Returns:
str: Return formatted message.
"""
click.secho(Log.ERROR.value, fg="red", bold=True, nl=False, err=True)
full_msg = Log.ERROR.value + msg
msg = format_message(full_msg, Log.ERROR)[len(Log.ERROR.value):]
click.secho(msg, fg="red", err=True)
return full_msg
| mit | Python |
d3ac89dacfd2991965c9638a8678cbf26aab4793 | Fix header mapping | nikdoof/django-eveigb | eveigb/middleware.py | eveigb/middleware.py | class IGBMiddleware(object):
"""
Middleware to detect the EVE IGB
"""
def process_request(self, request):
request.is_igb = False
request.is_igb_trusted = False
header_map = [
('HTTP_EVE_SERVERIP', 'eve_server_ip'),
('HTTP_EVE_CHARNAME', 'eve_charname'),
('HTTP_EVE_CHARID', 'eve_charid'),
('HTTP_EVE_CORPNAME', 'eve_corpname'),
('HTTP_EVE_CORPID', 'eve_corpid'),
('HTTP_EVE_ALLIANCENAME', 'eve_alliancename'),
('HTTP_EVE_ALLIANCEID', 'eve_allianceid'),
('HTTP_EVE_REGIONNAME', 'eve_regionid'),
('HTTP_EVE_CONSTELLATIONNAME', 'eve_constellationname'),
('HTTP_EVE_SOLARSYSTEMNAME', 'eve_systemname'),
('HTTP_EVE_STATIONNAME', 'eve_stationname'),
('HTTP_EVE_STATIONID', 'eve_stationid'),
('HTTP_EVE_CORPROLE', 'eve_corprole'),
]
if 'EVE-IGB' in request.META.get('HTTP_USER_AGENT', ''):
request.is_igb = True
if request.META.get('HTTP_EVE_TRUSTED', 'No') == 'Yes':
request.is_igb_trusted = True
for header, map in header_map:
if request.META.get(header, None):
setattr(request, map, request.META.get(header, None))
def igb(request):
return {
'is_igb': request.is_igb,
'is_igb_trusted': request.is_igb_trusted,
}
| class IGBMiddleware(object):
"""
Middleware to detect the EVE IGB
"""
def process_request(self, request):
request.is_igb = False
request.is_igb_trusted = False
header_map = [
('HTTP_EVE_SERVERIP', 'eve_server_ip'),
('HTTP_EVE_CHARNAME', 'eve_charname'),
('HTTP_EVE_CHARID', 'eve_charid'),
('HTTP_EVE_CORPNAME', 'eve_corpname'),
('HTTP_EVE_CORPID', 'eve_corpid'),
('HTTP_EVE_ALLIANCENAME', 'eve_alliancename'),
('HTTP_EVE_ALLIANCEID', 'eve_allianceid'),
('HTTP_EVE_REGIONNAME', 'eve_regionid'),
('HTTP_EVE_CONSTELLATIONNAME', 'eve_constellationname'),
('HTTP_EVE_SOLARSYSTEMNAME', 'eve_systemname'),
('HTTP_EVE_STATIONNAME,' 'eve_stationname'),
('HTTP_EVE_STATIONID,' 'eve_stationid'),
('HTTP_EVE_CORPROLE,' 'eve_corprole'),
]
if 'EVE-IGB' in request.META.get('HTTP_USER_AGENT', ''):
request.is_igb = True
if request.META.get('HTTP_EVE_TRUSTED', 'No') == 'Yes':
request.is_igb_trusted = True
for header, map in header_map:
if request.META.get(header, None):
setattr(request, map, request.META.get(header, None))
def igb(request):
return {
'is_igb': request.is_igb,
'is_igb_trusted': request.is_igb_trusted,
}
| bsd-3-clause | Python |
fd7c2beb8d0d400d0ba91b725ff2bb30d7f43e3b | remove debugging print | DuCorey/bokeh,quasiben/bokeh,percyfal/bokeh,DuCorey/bokeh,dennisobrien/bokeh,msarahan/bokeh,stonebig/bokeh,rs2/bokeh,DuCorey/bokeh,bokeh/bokeh,Karel-van-de-Plassche/bokeh,ptitjano/bokeh,azjps/bokeh,jakirkham/bokeh,DuCorey/bokeh,bokeh/bokeh,phobson/bokeh,KasperPRasmussen/bokeh,ptitjano/bokeh,clairetang6/bokeh,bokeh/bokeh,rs2/bokeh,rs2/bokeh,mindriot101/bokeh,azjps/bokeh,percyfal/bokeh,ptitjano/bokeh,jakirkham/bokeh,aavanian/bokeh,bokeh/bokeh,schoolie/bokeh,KasperPRasmussen/bokeh,bokeh/bokeh,KasperPRasmussen/bokeh,draperjames/bokeh,timsnyder/bokeh,draperjames/bokeh,KasperPRasmussen/bokeh,ptitjano/bokeh,clairetang6/bokeh,msarahan/bokeh,aavanian/bokeh,percyfal/bokeh,msarahan/bokeh,aiguofer/bokeh,mindriot101/bokeh,msarahan/bokeh,rs2/bokeh,phobson/bokeh,mindriot101/bokeh,Karel-van-de-Plassche/bokeh,percyfal/bokeh,justacec/bokeh,rs2/bokeh,draperjames/bokeh,philippjfr/bokeh,KasperPRasmussen/bokeh,dennisobrien/bokeh,schoolie/bokeh,ericmjl/bokeh,percyfal/bokeh,stonebig/bokeh,Karel-van-de-Plassche/bokeh,aiguofer/bokeh,justacec/bokeh,quasiben/bokeh,timsnyder/bokeh,dennisobrien/bokeh,jakirkham/bokeh,draperjames/bokeh,jakirkham/bokeh,aavanian/bokeh,philippjfr/bokeh,ericmjl/bokeh,justacec/bokeh,stonebig/bokeh,jakirkham/bokeh,schoolie/bokeh,schoolie/bokeh,Karel-van-de-Plassche/bokeh,aiguofer/bokeh,ptitjano/bokeh,phobson/bokeh,mindriot101/bokeh,phobson/bokeh,clairetang6/bokeh,azjps/bokeh,aavanian/bokeh,draperjames/bokeh,phobson/bokeh,Karel-van-de-Plassche/bokeh,justacec/bokeh,ericmjl/bokeh,ericmjl/bokeh,philippjfr/bokeh,ericmjl/bokeh,stonebig/bokeh,timsnyder/bokeh,schoolie/bokeh,clairetang6/bokeh,aavanian/bokeh,aiguofer/bokeh,quasiben/bokeh,azjps/bokeh,dennisobrien/bokeh,dennisobrien/bokeh,aiguofer/bokeh,philippjfr/bokeh,DuCorey/bokeh,azjps/bokeh,timsnyder/bokeh,philippjfr/bokeh,timsnyder/bokeh | examples/app/timeout.py | examples/app/timeout.py | ''' Present a plot updating according to a set of fixed timeout
intervals.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve timeout.py
at your command prompt. Then navigate to the URL
http://localhost:5006/timeout
in your browser.
'''
import sys
import numpy as np
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
N = 50
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
p.rect(x=50, y=50, width=80, height=80,
line_alpha=0.5, line_color="darkgrey", fill_color=None)
r = p.text(x=[], y=[], text=[], text_color=[],
text_font_size="20pt", text_baseline="middle", text_align="center")
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
func.interval = i * 100
return func
callbacks = [make_callback(i) for i in range(N)]
for callback in callbacks:
curdoc().add_timeout_callback(callback, callback.interval)
| ''' Present a plot updating according to a set of fixed timeout
intervals.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve timeout.py
at your command prompt. Then navigate to the URL
http://localhost:5006/timeout
in your browser.
'''
import sys
import numpy as np
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
N = 50
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
p.rect(x=50, y=50, width=80, height=80,
line_alpha=0.5, line_color="darkgrey", fill_color=None)
r = p.text(x=[], y=[], text=[], text_color=[],
text_font_size="20pt", text_baseline="middle", text_align="center")
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
print("FOOOO")
func.interval = i * 100
return func
callbacks = [make_callback(i) for i in range(N)]
for callback in callbacks:
curdoc().add_timeout_callback(callback, callback.interval)
| bsd-3-clause | Python |
3b35713626b1861d6dc022fa2a40f979cb415860 | remove debugging code | melmorabity/streamlink,javiercantero/streamlink,chhe/streamlink,beardypig/streamlink,back-to/streamlink,streamlink/streamlink,bastimeyer/streamlink,beardypig/streamlink,wlerin/streamlink,back-to/streamlink,bastimeyer/streamlink,melmorabity/streamlink,gravyboat/streamlink,streamlink/streamlink,chhe/streamlink,wlerin/streamlink,javiercantero/streamlink,gravyboat/streamlink | src/streamlink/plugins/facebook.py | src/streamlink/plugins/facebook.py | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import DASHStream, HTTPStream
from streamlink.utils import parse_json
class Facebook(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?facebook\.com/[^/]+/videos")
_mpd_re = re.compile(r'''(sd|hd)_src["']?\s*:\s*(?P<quote>["'])(?P<url>.+?)(?P=quote)''')
_playlist_re = re.compile(r'''video:\[({url:".+?}\])''')
_plurl_re = re.compile(r'''url:"(.*?)"''')
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_streams(self):
res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
for match in self._mpd_re.finditer(res.text):
manifest_url = match.group("url")
if "\\/" in manifest_url:
# if the URL is json encoded, decode it
manifest_url = parse_json("\"{}\"".format(manifest_url))
for s in DASHStream.parse_manifest(self.session, manifest_url).items():
yield s
else:
match = self._playlist_re.search(res.text)
playlist = match and match.group(1)
if playlist:
for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:
yield "live", HTTPStream(self.session, url)
__plugin__ = Facebook
| import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import DASHStream, HTTPStream
from streamlink.utils import parse_json
class Facebook(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?facebook\.com/[^/]+/videos")
_mpd_re = re.compile(r'''(sd|hd)_src["']?\s*:\s*(?P<quote>["'])(?P<url>.+?)(?P=quote)''')
_playlist_re = re.compile(r'''video:\[({url:".+?}\])''')
_plurl_re = re.compile(r'''url:"(.*?)"''')
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_streams(self):
res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
with open("temp.html", "w") as f:
f.write(res.text)
for match in self._mpd_re.finditer(res.text):
manifest_url = match.group("url")
if "\\/" in manifest_url:
# if the URL is json encoded, decode it
manifest_url = parse_json("\"{}\"".format(manifest_url))
for s in DASHStream.parse_manifest(self.session, manifest_url).items():
yield s
else:
match = self._playlist_re.search(res.text)
playlist = match and match.group(1)
if playlist:
for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:
yield "live", HTTPStream(self.session, url)
__plugin__ = Facebook
| bsd-2-clause | Python |
7077716c25849d3c365081c3a531c3e5020b3f4d | remove superfluous box_select tool | percyfal/bokeh,msarahan/bokeh,stonebig/bokeh,KasperPRasmussen/bokeh,KasperPRasmussen/bokeh,aiguofer/bokeh,quasiben/bokeh,ericmjl/bokeh,rs2/bokeh,KasperPRasmussen/bokeh,aavanian/bokeh,clairetang6/bokeh,azjps/bokeh,clairetang6/bokeh,aavanian/bokeh,Karel-van-de-Plassche/bokeh,philippjfr/bokeh,dennisobrien/bokeh,justacec/bokeh,schoolie/bokeh,phobson/bokeh,aiguofer/bokeh,dennisobrien/bokeh,mindriot101/bokeh,justacec/bokeh,timsnyder/bokeh,msarahan/bokeh,ericmjl/bokeh,clairetang6/bokeh,philippjfr/bokeh,justacec/bokeh,schoolie/bokeh,aiguofer/bokeh,bokeh/bokeh,mindriot101/bokeh,stonebig/bokeh,phobson/bokeh,mindriot101/bokeh,philippjfr/bokeh,justacec/bokeh,schoolie/bokeh,rs2/bokeh,jakirkham/bokeh,aiguofer/bokeh,aavanian/bokeh,Karel-van-de-Plassche/bokeh,jakirkham/bokeh,msarahan/bokeh,Karel-van-de-Plassche/bokeh,timsnyder/bokeh,jakirkham/bokeh,ericmjl/bokeh,aiguofer/bokeh,philippjfr/bokeh,DuCorey/bokeh,ptitjano/bokeh,Karel-van-de-Plassche/bokeh,ptitjano/bokeh,phobson/bokeh,DuCorey/bokeh,ericmjl/bokeh,ptitjano/bokeh,bokeh/bokeh,ptitjano/bokeh,Karel-van-de-Plassche/bokeh,dennisobrien/bokeh,aavanian/bokeh,rs2/bokeh,DuCorey/bokeh,percyfal/bokeh,timsnyder/bokeh,draperjames/bokeh,draperjames/bokeh,percyfal/bokeh,DuCorey/bokeh,dennisobrien/bokeh,rs2/bokeh,percyfal/bokeh,DuCorey/bokeh,draperjames/bokeh,bokeh/bokeh,philippjfr/bokeh,msarahan/bokeh,azjps/bokeh,ptitjano/bokeh,phobson/bokeh,quasiben/bokeh,dennisobrien/bokeh,azjps/bokeh,timsnyder/bokeh,percyfal/bokeh,quasiben/bokeh,KasperPRasmussen/bokeh,bokeh/bokeh,stonebig/bokeh,ericmjl/bokeh,bokeh/bokeh,draperjames/bokeh,phobson/bokeh,schoolie/bokeh,azjps/bokeh,rs2/bokeh,mindriot101/bokeh,jakirkham/bokeh,KasperPRasmussen/bokeh,jakirkham/bokeh,stonebig/bokeh,draperjames/bokeh,clairetang6/bokeh,schoolie/bokeh,azjps/bokeh,aavanian/bokeh,timsnyder/bokeh | examples/glyphs/maps.py | examples/glyphs/maps.py | from __future__ import print_function
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
GMapPlot, Range1d, ColumnDataSource,
PanTool, WheelZoomTool, BoxSelectTool,
BoxSelection, GMapOptions)
from bokeh.resources import INLINE
x_range = Range1d()
y_range = Range1d()
# JSON style string taken from: https://snazzymaps.com/style/1/pale-dawn
map_options = GMapOptions(lat=30.2861, lng=-97.7394, map_type="roadmap", zoom=13, styles="""
[{"featureType":"administrative","elementType":"all","stylers":[{"visibility":"on"},{"lightness":33}]},{"featureType":"landscape","elementType":"all","stylers":[{"color":"#f2e5d4"}]},{"featureType":"poi.park","elementType":"geometry","stylers":[{"color":"#c5dac6"}]},{"featureType":"poi.park","elementType":"labels","stylers":[{"visibility":"on"},{"lightness":20}]},{"featureType":"road","elementType":"all","stylers":[{"lightness":20}]},{"featureType":"road.highway","elementType":"geometry","stylers":[{"color":"#c5c6c6"}]},{"featureType":"road.arterial","elementType":"geometry","stylers":[{"color":"#e4d7c6"}]},{"featureType":"road.local","elementType":"geometry","stylers":[{"color":"#fbfaf7"}]},{"featureType":"water","elementType":"all","stylers":[{"visibility":"on"},{"color":"#acbcc9"}]}]
""")
plot = GMapPlot(
x_range=x_range, y_range=y_range,
map_options=map_options,
title="Austin"
)
source = ColumnDataSource(
data=dict(
lat=[30.2861, 30.2855, 30.2869],
lon=[-97.7394, -97.7390, -97.7405],
fill=['orange', 'blue', 'green']
)
)
circle = Circle(x="lon", y="lat", size=15, fill_color="fill", line_color="black")
plot.add_glyph(source, circle)
pan = PanTool()
wheel_zoom = WheelZoomTool()
plot.add_tools(pan, wheel_zoom)
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
filename = "maps.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Google Maps Example"))
print("Wrote %s" % filename)
view(filename)
| from __future__ import print_function
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
GMapPlot, Range1d, ColumnDataSource,
PanTool, WheelZoomTool, BoxSelectTool,
BoxSelectionOverlay, GMapOptions)
from bokeh.resources import INLINE
x_range = Range1d()
y_range = Range1d()
# JSON style string taken from: https://snazzymaps.com/style/1/pale-dawn
map_options = GMapOptions(lat=30.2861, lng=-97.7394, map_type="roadmap", zoom=13, styles="""
[{"featureType":"administrative","elementType":"all","stylers":[{"visibility":"on"},{"lightness":33}]},{"featureType":"landscape","elementType":"all","stylers":[{"color":"#f2e5d4"}]},{"featureType":"poi.park","elementType":"geometry","stylers":[{"color":"#c5dac6"}]},{"featureType":"poi.park","elementType":"labels","stylers":[{"visibility":"on"},{"lightness":20}]},{"featureType":"road","elementType":"all","stylers":[{"lightness":20}]},{"featureType":"road.highway","elementType":"geometry","stylers":[{"color":"#c5c6c6"}]},{"featureType":"road.arterial","elementType":"geometry","stylers":[{"color":"#e4d7c6"}]},{"featureType":"road.local","elementType":"geometry","stylers":[{"color":"#fbfaf7"}]},{"featureType":"water","elementType":"all","stylers":[{"visibility":"on"},{"color":"#acbcc9"}]}]
""")
plot = GMapPlot(
x_range=x_range, y_range=y_range,
map_options=map_options,
title="Austin"
)
source = ColumnDataSource(
data=dict(
lat=[30.2861, 30.2855, 30.2869],
lon=[-97.7394, -97.7390, -97.7405],
fill=['orange', 'blue', 'green']
)
)
circle = Circle(x="lon", y="lat", size=15, fill_color="fill", line_color="black")
plot.add_glyph(source, circle)
pan = PanTool()
wheel_zoom = WheelZoomTool()
box_select = BoxSelectTool()
plot.add_tools(pan, wheel_zoom, box_select)
overlay = BoxSelectionOverlay(tool=box_select)
plot.add_layout(overlay)
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
filename = "maps.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Google Maps Example"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause | Python |
aed8a831bca72268ad9fbcd2f777d91af29d61b6 | Define const `OUTPUT_PANEL` for the panel name | SublimeLinter/SublimeLinter3,SublimeLinter/SublimeLinter3 | message_view.py | message_view.py | import sublime
import sublime_plugin
PANEL_NAME = "SublimeLinter Messages"
OUTPUT_PANEL = "output." + PANEL_NAME
def plugin_unloaded():
for window in sublime.windows():
window.destroy_output_panel(PANEL_NAME)
class SublimeLinterDisplayPanelCommand(sublime_plugin.WindowCommand):
def run(self, msg=""):
panel_view = self.window.create_output_panel(PANEL_NAME, True)
panel_view.set_read_only(False)
panel_view.run_command('append', {'characters': msg})
panel_view.set_read_only(True)
panel_view.show(0)
self.window.run_command("show_panel", {"panel": OUTPUT_PANEL})
class SublimeLinterRemovePanelCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.destroy_output_panel(PANEL_NAME)
| import sublime
import sublime_plugin
PANEL_NAME = "SublimeLinter Messages"
def plugin_unloaded():
for window in sublime.windows():
window.destroy_output_panel(PANEL_NAME)
class SublimeLinterDisplayPanelCommand(sublime_plugin.WindowCommand):
def run(self, msg=""):
panel_view = self.window.create_output_panel(PANEL_NAME, True)
panel_view.set_read_only(False)
panel_view.run_command('append', {'characters': msg})
panel_view.set_read_only(True)
panel_view.show(0)
self.window.run_command("show_panel", {"panel": "output.{}".format(PANEL_NAME)})
class SublimeLinterRemovePanelCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.destroy_output_panel(PANEL_NAME)
| mit | Python |
1874bd2172d4c96b9188cf2b157412d2522331ef | make it slightly larger | stoq/kiwi | examples/list/simple.py | examples/list/simple.py | import gtk
from kiwi.ui.objectlist import Column, ObjectList
class Fruit:
def __init__(self, name, price):
self.name = name
self.price = price
fruits = ObjectList([Column('name', data_type=str),
Column('price', data_type=int)])
for name, price in [('Apple', 4),
('Pineapple', 2),
('Kiwi', 8),
('Banana', 3),
('Melon', 5)]:
fruits.append(Fruit(name, price))
window = gtk.Window()
window.connect('delete-event', gtk.main_quit)
window.set_title('Fruits')
window.set_size_request(150, 180)
window.add(fruits)
window.show_all()
gtk.main()
| import gtk
from kiwi.ui.objectlist import Column, ObjectList
class Fruit:
def __init__(self, name, price):
self.name = name
self.price = price
fruits = ObjectList([Column('name', data_type=str),
Column('price', data_type=int)])
for name, price in [('Apple', 4),
('Pineapple', 2),
('Kiwi', 8),
('Banana', 3),
('Melon', 5)]:
fruits.append(Fruit(name, price))
window = gtk.Window()
window.connect('delete-event', gtk.main_quit)
window.set_title('Fruits')
window.set_size_request(150, 150)
window.add(fruits)
window.show_all()
gtk.main()
| lgpl-2.1 | Python |
abc135f36bbacd8783eb1855298565caa681d15d | bump minor release | izhan/Stream-Framework,izhan/Stream-Framework,smuser90/Stream-Framework,Anislav/Stream-Framework,SergioChan/Stream-Framework,SergioChan/Stream-Framework,smuser90/Stream-Framework,SergioChan/Stream-Framework,nikolay-saskovets/Feedly,smuser90/Stream-Framework,turbolabtech/Stream-Framework,Anislav/Stream-Framework,Architizer/Feedly,smuser90/Stream-Framework,izhan/Stream-Framework,turbolabtech/Stream-Framework,turbolabtech/Stream-Framework,izhan/Stream-Framework,SergioChan/Stream-Framework,Architizer/Feedly,Architizer/Feedly,nikolay-saskovets/Feedly,Anislav/Stream-Framework,nikolay-saskovets/Feedly,Anislav/Stream-Framework,turbolabtech/Stream-Framework,nikolay-saskovets/Feedly | feedly/__init__.py | feedly/__init__.py | __author__ = 'Thierry Schellenbach'
__copyright__ = 'Copyright 2012, Thierry Schellenbach'
__credits__ = ['Thierry Schellenbach, mellowmorning.com, @tschellenbach']
__license__ = 'BSD'
__version__ = '0.8.117'
__maintainer__ = 'Thierry Schellenbach'
__email__ = 'thierryschellenbach@gmail.com'
__status__ = 'Production'
| __author__ = 'Thierry Schellenbach'
__copyright__ = 'Copyright 2012, Thierry Schellenbach'
__credits__ = ['Thierry Schellenbach, mellowmorning.com, @tschellenbach']
__license__ = 'BSD'
__version__ = '0.8.116'
__maintainer__ = 'Thierry Schellenbach'
__email__ = 'thierryschellenbach@gmail.com'
__status__ = 'Production'
| bsd-3-clause | Python |
22cb2159d730fc2fd6c71a95eea20c7272e0d175 | bump a version | SergioChan/Stream-Framework,turbolabtech/Stream-Framework,Architizer/Feedly,turbolabtech/Stream-Framework,nikolay-saskovets/Feedly,Anislav/Stream-Framework,nikolay-saskovets/Feedly,turbolabtech/Stream-Framework,Architizer/Feedly,izhan/Stream-Framework,smuser90/Stream-Framework,nikolay-saskovets/Feedly,Anislav/Stream-Framework,izhan/Stream-Framework,SergioChan/Stream-Framework,Anislav/Stream-Framework,Anislav/Stream-Framework,izhan/Stream-Framework,SergioChan/Stream-Framework,turbolabtech/Stream-Framework,Architizer/Feedly,SergioChan/Stream-Framework,smuser90/Stream-Framework,smuser90/Stream-Framework,smuser90/Stream-Framework,nikolay-saskovets/Feedly,izhan/Stream-Framework | feedly/__init__.py | feedly/__init__.py | __author__ = 'Thierry Schellenbach'
__copyright__ = 'Copyright 2012, Thierry Schellenbach'
__credits__ = ['Thierry Schellenbach, mellowmorning.com, @tschellenbach']
__license__ = 'BSD'
__version__ = '0.3.11'
__maintainer__ = 'Thierry Schellenbach'
__email__ = 'thierryschellenbach@gmail.com'
__status__ = 'Production'
from feedly.connection import get_redis_connection
| __author__ = 'Thierry Schellenbach'
__copyright__ = 'Copyright 2012, Thierry Schellenbach'
__credits__ = ['Thierry Schellenbach, mellowmorning.com, @tschellenbach']
__license__ = 'BSD'
__version__ = '0.3.10'
__maintainer__ = 'Thierry Schellenbach'
__email__ = 'thierryschellenbach@gmail.com'
__status__ = 'Production'
from feedly.connection import get_redis_connection
| bsd-3-clause | Python |
73ef474ddc74478f2ae2d3d1e07993b99ed733e5 | fix coding bug | likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet,likelyzhao/mxnet | example/rcnn/eval.py | example/rcnn/eval.py | from __future__ import print_function
import argparse
import mxnet as mx
from rcnn.config import config, default, generate_config
from rcnn.tools.test_rcnn import test_rcnn
from pycrayon import CrayonClient
from datetime import datetime
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# testing
parser.add_argument('--prefix', help='model to test with', default=default.e2e_prefix, type=str)
parser.add_argument('--epoch', help='model to test with', default=default.e2e_epoch, type=str)
parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true', default=True)
parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str)
# Tensorboard
parser.add_argument('--exp_name', help='name of tensor board expriment name', default=None, type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
ctx = mx.gpu(args.gpu)
print(args)
cc = CrayonClient(hostname='10.132.90.242')
if args.exp_name is None:
args.exp_name = datetime.now().strftime('frcnnEval_%m-%d')
try:
exp = cc.create_experiment(args.exp_name)
except:
exp = cc.open_experiment(args.exp_name)
else:
exp = cc.open_experiment(args.exp_name)
for x in args.epoch.split(","):
mAp = test_rcnn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path,
ctx, args.prefix,int(x),
args.vis, args.shuffle, args.has_rpn, args.proposal, args.thresh)
exp.add_scalar_value('mAp', mAp)
return
if __name__ == '__main__':
main()
| from __future__ import print_function
import argparse
import mxnet as mx
from rcnn.config import config, default, generate_config
from rcnn.tools.test_rcnn import test_rcnn
from pycrayon import CrayonClient
from datetime import datetime
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# testing
parser.add_argument('--prefix', help='model to test with', default=default.e2e_prefix, type=str)
parser.add_argument('--epoch', help='model to test with', default=default.e2e_epoch, type=str)
parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true', default=True)
parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str)
# Tensorboard
parser.add_argument('--exp_name', help='name of tensor board expriment name', default=None, type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
ctx = mx.gpu(args.gpu)
print(args)
cc = CrayonClient(hostname='10.132.90.242')
if args.exp_name is None:
args.exp_name = datetime.now().strftime('frcnnEval_%m-%d')
try:
exp = cc.create_experiment(args.exp_name)
except:
exp = cc.open_experiment(args.exp_name)
else:
exp = cc.open_experiment(args.exp_name)
mAp = [1,2,5];
for x in args.epoch.split(","):
mAp = test_rcnn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path,
ctx, args.prefix,int(x),
args.vis, args.shuffle, args.has_rpn, args.proposal, args.thresh)
os.path.re
exp.add_scalar_value('mAp', mAp)
return
if __name__ == '__main__':
main()
| apache-2.0 | Python |
5b864c5fa5a27bbaddf7e7deafeedb19f0e0e51d | Update Watchers.py | possoumous/Watchers,possoumous/Watchers,possoumous/Watchers,possoumous/Watchers | examples/Watchers.py | examples/Watchers.py | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('https://stocktwits.com/symbol/CYTR?q=cytr') # Navigate to the web page
self.assert_element('sentiment-tab') # Assert element on page
self.click('sentiment-tab') # Click element on page
| from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('https://stocktwits.com/') # Navigate to the web page
self.assert_element('sentiment-tab') # Assert element on page
self.click('sentiment-tab') # Click element on page
| mit | Python |
66ce9215ead86fa736bef13b065a281efb722a12 | remove deprecated TEMPLATE_DEBUG setting | fnp/wolnelektury,fnp/wolnelektury,fnp/wolnelektury,fnp/wolnelektury | src/wolnelektury/settings/basic.py | src/wolnelektury/settings/basic.py | # -*- coding: utf-8 -*-
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from os import path
from .paths import PROJECT_DIR
DEBUG = False
MAINTENANCE_MODE = False
ADMINS = [
# ('Your Name', 'your_email@domain.com'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # 'postgresql_psycopg2'
'NAME': path.join(PROJECT_DIR, 'dev.db'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
}
}
SOLR = "http://localhost:8983/solr/wl/"
SOLR_TEST = "http://localhost:8983/solr/wl_test/"
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
USE_TZ = True
SITE_ID = 1
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
),
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'wolnelektury.context_processors.extra_settings',
'search.context_processors.search_form',
),
},
}]
| # -*- coding: utf-8 -*-
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from os import path
from .paths import PROJECT_DIR
DEBUG = False
TEMPLATE_DEBUG = DEBUG
MAINTENANCE_MODE = False
ADMINS = [
# ('Your Name', 'your_email@domain.com'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # 'postgresql_psycopg2'
'NAME': path.join(PROJECT_DIR, 'dev.db'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
}
}
SOLR = "http://localhost:8983/solr/wl/"
SOLR_TEST = "http://localhost:8983/solr/wl_test/"
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
USE_TZ = True
SITE_ID = 1
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
),
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'wolnelektury.context_processors.extra_settings',
'search.context_processors.search_form',
),
},
}]
| agpl-3.0 | Python |
2a53a16a3683fc8aba4982ae010580365b813b8e | fix syntax | shortdudey123/SysInfoAPI | src/filesystem.py | src/filesystem.py | #!/usr/bin/env python
# =============================================================================
# file = filesystem.py
# description = Gets file system data
# author = GR <https://github.com/shortdudey123>
# create_date = 2014-07-29
# mod_date = 2014-07-29
# version = 0.1
# usage =
# notes =
# python_ver = 2.7.6
# =============================================================================
import subprocess
import platform
import re
def getFilesystemData():
retData = {}
sys = platform.system()
if sys == 'Linux':
proc = subprocess.Popen(['df'], stdout=subprocess.PIPE)
rawData = proc.communicate()
rawData = rawData[0].replace('Mounted on', 'Mounted_on')
rawDataLines = rawData.rstrip('\n').split('\n')
# remove the header
del rawDataLines[0]
for line in rawDataLines:
line = re.sub(' +', ' ', line)
line = line.split(' ')
retData[line[5]] = {'Filesystem': line[0],
'1K-blocks': line[1],
'Used': line[2],
'Available': line[3],
'UsePercent': line[4]
}
return retData
if __name__ == '__main__':
print getFilesystemData() | #!/usr/bin/env python
# =============================================================================
# file = filesystem.py
# description = Gets file system data
# author = GR <https://github.com/shortdudey123>
# create_date = 2014-07-29
# mod_date = 2014-07-29
# version = 0.1
# usage =
# notes =
# python_ver = 2.7.6
# =============================================================================
import subprocess
import platform
import re
def getFilesystemData():
retData = {}
sys = platform.system()
if sys == 'Linux':
proc = subprocess.Popen(['df'], stdout=subprocess.PIPE)
rawData = proc.communicate()
rawData = rawData[0].replace('Mounted on', 'Mounted_on')
rawDataLines.rstrip('\n')
rawDataLines = rawData.split('\n')
# remove the header
del rawDataLines[0]
for line in rawDataLines:
line = re.sub(' +', ' ', line)
line = line.split(' ')
retData[line[5]] = {'Filesystem': line[0],
'1K-blocks': line[1],
'Used': line[2],
'Available': line[3],
'UsePercent': line[4]
}
return retData
if __name__ == '__main__':
print getFilesystemData() | apache-2.0 | Python |
7715cd597dc5fad449ae2eca80f6bc4ad8bc64e9 | Add Slugged mixin to assessment template | VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core | src/ggrc/models/assessment_template.py | src/ggrc/models/assessment_template.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""A module containing the implementation of the assessment template entity."""
from ggrc import db
from ggrc.models.mixins import Base
from ggrc.models.mixins import Slugged
from ggrc.models.mixins import Titled
from ggrc.models.mixins import CustomAttributable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.types import JsonType
class AssessmentTemplate(Slugged, Base, Relatable, Titled,
CustomAttributable, db.Model):
"""A class representing the assessment template entity.
An Assessment Template is a template that allows users for easier creation of
multiple Assessments that are somewhat similar to each other, avoiding the
need to repeatedly define the same set of properties for every new Assessment
object.
"""
__tablename__ = "assessment_templates"
# the type of the object under assessment
template_object_type = db.Column(db.String, nullable=True)
# whether to use the control test plan as a procedure
test_plan_procedure = db.Column(db.Boolean, nullable=False)
# procedure description
procedure_description = db.Column(db.Text, nullable=True)
# the people that should be assigned by default to each assessment created
# within the releated audit
default_people = db.Column(JsonType, nullable=False)
# labels to show to the user in the UI for various default people values
DEFAULT_PEOPLE_LABELS = {
"Object Owners": "Object Owners",
"Audit Lead": "Audit Lead",
"Auditors": "Auditors",
"Primary Assessor": "Principal Assessor",
"Secondary Assessors": "Secondary Assessors",
"Primary Contact": "Primary Contact",
"Secondary Contact": "Secondary Contact",
}
_title_uniqueness = False
# REST properties
_publish_attrs = [
"template_object_type",
"test_plan_procedure",
"procedure_description",
"default_people",
PublishOnly("DEFAULT_PEOPLE_LABELS")
]
@classmethod
def generate_slug_prefix_for(cls, obj):
return "TEMPLATE"
| # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""A module containing the implementation of the assessment template entity."""
from ggrc import db
from ggrc.models.mixins import Base, Titled, CustomAttributable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.types import JsonType
class AssessmentTemplate(Base, Relatable, Titled,
CustomAttributable, db.Model):
"""A class representing the assessment template entity.
An Assessment Template is a template that allows users for easier creation of
multiple Assessments that are somewhat similar to each other, avoiding the
need to repeatedly define the same set of properties for every new Assessment
object.
"""
__tablename__ = "assessment_templates"
# the type of the object under assessment
template_object_type = db.Column(db.String, nullable=True)
# whether to use the control test plan as a procedure
test_plan_procedure = db.Column(db.Boolean, nullable=False)
# procedure description
procedure_description = db.Column(db.Text, nullable=True)
# the people that should be assigned by default to each assessment created
# within the releated audit
default_people = db.Column(JsonType, nullable=False)
# labels to show to the user in the UI for various default people values
DEFAULT_PEOPLE_LABELS = {
"Object Owners": "Object Owners",
"Audit Lead": "Audit Lead",
"Auditors": "Auditors",
"Primary Assessor": "Principal Assessor",
"Secondary Assessors": "Secondary Assessors",
"Primary Contact": "Primary Contact",
"Secondary Contact": "Secondary Contact",
}
_title_uniqueness = False
# REST properties
_publish_attrs = [
"template_object_type",
"test_plan_procedure",
"procedure_description",
"default_people",
PublishOnly("DEFAULT_PEOPLE_LABELS")
]
| apache-2.0 | Python |
d623bec8d83f47e86228cc686470f6be84aa324d | Update to run shell | Genora51/Bitwise | bitwise.py | bitwise.py | from lib.bitparser import Parse
from lib.lexer import lex
from lib.basefuncs import Token, tokens
import os
from lib.evaluator import runStates as evaluate
import hashlib
def h11(w):
return hashlib.md5(w.encode()).hexdigest()[:9]
def interpret(text, vals = None):
lexed = lex(text, tokens)
parsed = Parse(lexed)
if vals is None:
return evaluate(parsed)
else: return evaluate(parsed,vals)
def runFile(fileName):
dirf = os.path.dirname(os.path.realpath(fileName))
cach = r"%s\__bitcache__\%sc"%(dirf,os.path.basename(fileName))
mtxt = open(fileName).read()
if os.path.isfile(cach):
hashed, ptext = open(cach).read().split('\n')
if h11(mtxt) == hashed:
parsed = eval(ptext)
else:
parsed = parsNo(mtxt,dirf,cach)
else:
parsed = parsNo(mtxt,dirf,cach)
evaluate(parsed)
def parsNo(mtxt,dirs,cach):
try:
os.mkdir(dirs + r"\__bitcache__")
except OSError:
pass
lexed = lex(mtxt, tokens)
parsed = Parse(lexed)
with open(cach,'w') as f:
pt = repr(parsed)
f.write(h11(mtxt) + '\n')
f.write(pt)
return parsed
def runCmd(args):
runFile(args.program)
if args.inputend:
input('Press enter to continue...')
def shell(args):
ext = False
sysex = False
vs = None
print("Bitwise Shell")
print("-"*20)
while not ext:
if sysex:
sysex = False
inp = input('\n-> ')
else:
inp = input('-> ')
ext = inp == "exit"
try:
if not ext:
if vs is not None:
vs = interpret(inp)
else: vs = interpret(inp, vs)
except SystemExit:
sysex = True
if __name__ == '__main__':
import argparse
# ...
parser = argparse.ArgumentParser(
description="Interpreter for the Bitwise Language."
)
pars = parser.add_subparsers()
rg = pars.add_parser('run', help='Run a Bitwise (.bit) program.')
rg.add_argument(
"-i",
"--inputend",
action="store_true",
help="Whether to end the program with a pause."
)
rg.add_argument(
"program",
help="The path of the program to be run."
)
rg.set_defaults(func=runCmd)
sg = pars.add_parser('shell', help="Run a Bitwise shell.")
sg.set_defaults(func=shell)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
args = parser.parse_args(['-h']) | from lib.bitparser import Parse
from lib.lexer import lex
from lib.basefuncs import Token, tokens
import os
from lib.evaluator import runStates as evaluate
import hashlib
def h11(w):
return hashlib.md5(w.encode()).hexdigest()[:9]
def interpret(text, vals = None):
lexed = lex(text, tokens)
parsed = Parse(lexed)
if vals is None:
return evaluate(parsed)
else: return evaluate(parsed,vals)
def runFile(fileName):
dirf = os.path.dirname(os.path.realpath(fileName))
cach = r"%s\__bitcache__\%sc"%(dirf,os.path.basename(fileName))
mtxt = open(fileName).read()
if os.path.isfile(cach):
hashed, ptext = open(cach).read().split('\n')
if h11(mtxt) == hashed:
parsed = eval(ptext)
else:
parsed = parsNo(mtxt,dirf,cach)
else:
parsed = parsNo(mtxt,dirf,cach)
evaluate(parsed)
def parsNo(mtxt,dirs,cach):
try:
os.mkdir(dirs + r"\__bitcache__")
except OSError:
pass
lexed = lex(mtxt, tokens)
parsed = Parse(lexed)
with open(cach,'w') as f:
pt = repr(parsed)
f.write(h11(mtxt) + '\n')
f.write(pt)
return parsed
def runCmd(args):
runFile(args.program)
if args.inputend:
input('Press enter to continue...')
def shell(args):
ext = False
vs = None
print("Bitwise Shell")
print("-"*20)
while not ext:
inp = input('-> ')
ext = inp == "exit"
if not ext:
if vs is not None:
vs = interpret(inp)
else: vs = interpret(inp, vs)
if __name__ == '__main__':
import argparse
# ...
parser = argparse.ArgumentParser(
description="Interpreter for the Bitwise Language."
)
pars = parser.add_subparsers()
rg = pars.add_parser('run', help='Run a Bitwise (.bit) program.')
rg.add_argument(
"-i",
"--inputend",
action="store_true",
help="Whether to end the program with a pause."
)
rg.add_argument(
"program",
help="The path of the program to be run."
)
rg.set_defaults(func=runCmd)
sg = pars.add_parser('shell', help="Run a Bitwise shell.")
sg.set_defaults(func=shell)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
args = parser.parse_args(['-h']) | mit | Python |
12ac6964737b08c592fcdba02cfba66ab64bad28 | remove deprecated constant | yero13/agilego.py | extract/jira/backlog.py | extract/jira/backlog.py | import json
from .cfg import jira_cfg
from .request import Request, Field
class SprintBacklogRequest(Request):
def __init__(self, login, pswd):
with open(jira_cfg[__class__.__name__]) as cfg_file:
Request.__init__(self, json.load(cfg_file, strict=False), login, pswd, is_multipage=True)
def _parse_response(self, response, out_data):
backlog = []
Field._parse_field(response, self._response_cfg[self._content_root], backlog)
for issue in backlog:
out_data.update({issue[Field.FIELD_KEY]:issue})
self._logger.debug('issue: {}'.format(issue))
return out_data
| import json
from .cfg import jira_cfg
from .request import Request, Field
class SprintBacklogRequest(Request):
# ToDo: move to json cfg file as response key item to parse
__KEY_BACKLOG_ITEMS = 'issues'
def __init__(self, login, pswd):
with open(jira_cfg[__class__.__name__]) as cfg_file:
Request.__init__(self, json.load(cfg_file, strict=False), login, pswd, is_multipage=True)
def _parse_response(self, response, out_data):
backlog = []
Field._parse_field(response, self._response_cfg[self._content_root], backlog)
for issue in backlog:
out_data.update({issue[Field.FIELD_KEY]:issue})
self._logger.debug('issue: {}'.format(issue))
return out_data
| mit | Python |
8922a35a5c0976b9930e269b86361fc2235a6d08 | Bump version to 0.5.1. | XiaonuoGantan/pywebsocket,XiaonuoGantan/pywebsocket | src/setup.py | src/setup.py | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
version='0.5.1',
)
# vi:sts=4 sw=4 et
| #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
if sys.version < '2.3':
print >>sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
setup(author='Yuzo Fujishima',
author_email='yuzo@chromium.org',
description='Web Socket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'Web Socket (http://tools.ietf.org/html/'
'draft-hixie-thewebsocketprotocol). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
version='0.4.9.2',
)
# vi:sts=4 sw=4 et
| bsd-3-clause | Python |
0b89abf9cc0e0bbeb50dce7df6d680360e3152c3 | disable account and password in default config | eads/tarbell,eads/tarbell,tarbell-project/tarbell,NUKnightLab/tarbell,NUKnightLab/tarbell,eyeseast/tarbell,tarbell-project/tarbell,eyeseast/tarbell,NUKnightLab/tarbell | tarbell/project_template/config.py | tarbell/project_template/config.py | """
Google doc configuration. If not provided, no Google doc will be used.
"""
{% if spreadsheet_key %}
GOOGLE_DOC = {
'key': '{{ spreadsheet_key }}',
#'account': '<gmail address>',
#'password': '<password>',
}
{% else %}
# GOOGLE_DOC = {
# 'key': '<spreadsheet key>',
# 'account': '<gmail address>',
# 'password': '<password>',
# }
{% endif %}
"""
Set default context. These variables will be globally available to the template.
"""
DEFAULT_CONTEXT = {
'title': '{{ long_name }}',
'ad_path': '',
'analytics_path': '',
}
"""
Root URL project will appear at (e.g. http://mydomain.tld/{{ url_root }})
"""
# URL_ROOT = '{{ project_name }}'
"""
Don't render to static HTML.
"""
# DONT_PUBLISH = False
"""
Uncomment the following lines to provide this configuration file as a Flask
blueprint.
"""
# from flask import Blueprint
# blueprint = Blueprint('{{ project_name }}', __name__)
"""
Example use of flask blueprint to add a template filter.
"""
# @blueprint.app_template_filter('example_filter')
# def example_filter(text):
# return text + ' ...suffix.'
| """
Google doc configuration. If not provided, no Google doc will be used.
"""
{% if spreadsheet_key %}
GOOGLE_DOC = {
'key': '{{ spreadsheet_key }}',
'account': '<gmail address>',
'password': '<password>',
}
{% else %}
# GOOGLE_DOC = {
# 'key': '<spreadsheet key>',
# 'account': '<gmail address>',
# 'password': '<password>',
# }
{% endif %}
"""
Set default context. These variables will be globally available to the template.
"""
DEFAULT_CONTEXT = {
'title': '{{ long_name }}',
'ad_path': '',
'analytics_path': '',
}
"""
Root URL project will appear at (e.g. http://mydomain.tld/{{ url_root }})
"""
# URL_ROOT = '{{ project_name }}'
"""
Don't render to static HTML.
"""
# DONT_PUBLISH = False
"""
Uncomment the following lines to provide this configuration file as a Flask
blueprint.
"""
# from flask import Blueprint
# blueprint = Blueprint('{{ project_name }}', __name__)
"""
Example use of flask blueprint to add a template filter.
"""
# @blueprint.app_template_filter('example_filter')
# def example_filter(text):
# return text + ' ...suffix.'
| bsd-3-clause | Python |
5396109075f0b0c56dc02968256457eed9236722 | Fix MemMatrix script's output. | HimariO/VideoSum,HimariO/VideoSum | tasks/video/Visualize/MemMatrix.py | tasks/video/Visualize/MemMatrix.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import getopt
import sys
options, _ = getopt.getopt(sys.argv[1:], '', ['file='])
filename = 'Ugb_uH72d0I_8_17_memMatrix_step-81382.npy'
dpi = 150
fig_size = (2560 / dpi, 1440 / dpi)
for opt in options:
if opt[0] == '--file':
filename = opt[1]
mem_dict = np.load(filename).tolist()
# already get those from MemView.py
mem_dict.pop('usage_vector')
mem_dict.pop('read_weightings')
mem_dict.pop('write_weighting')
for k in mem_dict.keys():
mem_dict[k] = mem_dict[k][0]
fig = plt.figure()
fig2 = plt.figure()
# fig3 = plt.figure()
link_mat = mem_dict['link_matrix']
mem_mat = mem_dict['memory_matrix']
read_vecs = mem_dict['read_vectors']
ims = []
ax1 = fig.add_subplot(1, 2, 1)
for i in range(link_mat.shape[0]):
im = ax1.imshow(link_mat[i], animated=True)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=75, blit=False, repeat_delay=0)
# plt.colorbar(orientation='vertical')
# ani.save('dynamic_images.mp4', extra_args=['-vcodec', 'libx264', '-pix_fmt', 'yuv420p'])
ims = []
ax2 = fig.add_subplot(1, 2, 2)
for i in range(mem_mat.shape[0]):
im = ax2.imshow(mem_mat[i], animated=True)
ims.append([im])
ani2 = animation.ArtistAnimation(fig, ims, interval=75, blit=False, repeat_delay=0)
# plt.colorbar(orientation='vertical')
# ani2.save('dynamic_images.mp4')
# plt.show()
for i in range(4):
a = fig2.add_subplot(2, 2, i + 1)
imgplot = plt.imshow(read_vecs[:, :, i])
# imgplot.set_clim(0.0, 0.7)
a.set_title('readhead[%d] read vector(values)' % i)
a.set_ylabel('step')
plt.colorbar(orientation='vertical')
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import getopt
import sys
options, _ = getopt.getopt(sys.argv[1:], '', ['file='])
filename = 'Ugb_uH72d0I_8_17_memMatrix_step-81382.npy'
dpi = 150
fig_size = (2560 / dpi, 1440 / dpi)
for opt in options:
if opt[0] == '--file':
filename = opt[1]
mem_dict = np.load(filename).tolist()
# already get those from MemView.py
mem_dict.pop('usage_vector')
mem_dict.pop('read_weightings')
mem_dict.pop('write_weighting')
for k in mem_dict.keys():
mem_dict[k] = mem_dict[k][0]
fig = plt.figure()
fig2 = plt.figure()
fig3 = plt.figure()
link_mat = mem_dict['link_matrix']
mem_mat = mem_dict['memory_matrix']
read_vecs = mem_dict['read_vectors']
ims = []
for i in range(link_mat.shape[0]):
im = plt.imshow(link_mat[i], animated=True)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=75, blit=True, repeat_delay=0)
plt.colorbar(orientation='vertical')
ani.save('dynamic_images.mp4', extra_args=['-vcodec', 'libx264', '-pix_fmt', 'yuv420p'])
ims = []
for i in range(mem_mat.shape[0]):
im = plt.imshow(mem_mat[i], animated=True)
ims.append([im])
ani2 = animation.ArtistAnimation(fig2, ims, interval=75, blit=True, repeat_delay=0)
plt.colorbar(orientation='vertical')
# ani2.save('dynamic_images.mp4')
# plt.show()
# for i in range(4):
# a = fig.add_subplot(2, 2, i + 1)
# imgplot = plt.imshow(read_vecs[:, :, i])
# # imgplot.set_clim(0.0, 0.7)
# a.set_title('readhead[%d] read vector(values)' % i)
# a.set_ylabel('step')
# plt.colorbar(orientation='vertical')
#
# plt.show()
| mit | Python |
745b98aa465bc05500ffd893c15844cb454011f9 | Improve tests | okuta/chainer,ktnyt/chainer,anaruse/chainer,kikusu/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,aonotas/chainer,niboshi/chainer,benob/chainer,kikusu/chainer,tkerola/chainer,cupy/cupy,wkentaro/chainer,ronekko/chainer,okuta/chainer,benob/chainer,okuta/chainer,ktnyt/chainer,AlpacaDB/chainer,AlpacaDB/chainer,chainer/chainer,kiyukuta/chainer,niboshi/chainer,jnishi/chainer,jnishi/chainer,cupy/cupy,chainer/chainer,kashif/chainer,hvy/chainer,okuta/chainer,chainer/chainer,chainer/chainer,ysekky/chainer,keisuke-umezawa/chainer,cupy/cupy,hvy/chainer,ktnyt/chainer,niboshi/chainer,cemoody/chainer,rezoo/chainer,delta2323/chainer,jnishi/chainer,hvy/chainer,wkentaro/chainer,niboshi/chainer,ktnyt/chainer,wkentaro/chainer,keisuke-umezawa/chainer,cupy/cupy,wkentaro/chainer,pfnet/chainer,hvy/chainer,jnishi/chainer | tests/cupy_tests/manipulation_tests/test_kind.py | tests/cupy_tests/manipulation_tests/test_kind.py | import unittest
from cupy import testing
@testing.gpu
class TestKind(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray1(self, xp, dtype):
x = xp.zeros((2, 3), dtype)
ret = xp.asfortranarray(x)
self.assertTrue(x.flags.c_contiguous)
self.assertTrue(ret.flags.f_contiguous)
return ret.strides
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray2(self, xp, dtype):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(x)
self.assertTrue(x.flags.c_contiguous)
self.assertTrue(ret.flags.f_contiguous)
return ret.strides
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray3(self, xp, dtype):
x = xp.zeros((2, 3, 4), dtype)
ret = xp.asfortranarray(xp.asfortranarray(x))
self.assertTrue(x.flags.c_contiguous)
self.assertTrue(ret.flags.f_contiguous)
return ret.strides
| import unittest
from cupy import testing
@testing.gpu
class TestKind(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray1(self, xp, dtype):
x = xp.zeros((2, 3))
return xp.asfortranarray(x).strides
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray2(self, xp, dtype):
x = xp.zeros((2, 3, 4))
return xp.asfortranarray(x).strides
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(type_check=False)
def test_asfortranarray3(self, xp, dtype):
x = xp.zeros((2, 3, 4))
return xp.asfortranarray(xp.asfortranarray(x)).strides
| mit | Python |
958bb725cce490ecf5d9f2052e739d2b1fe84b3d | Make centroid factory locations a little more plausible | vessemer/concept-to-clinic,vessemer/concept-to-clinic,vessemer/concept-to-clinic,vessemer/concept-to-clinic | interface/backend/images/factories.py | interface/backend/images/factories.py | import factory
import factory.fuzzy
from backend.images import models
class ImageSeriesFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.ImageSeries
patient_id = factory.Sequence(lambda n: "TEST-SERIES-%04d" % n)
series_instance_uid = factory.Sequence(lambda n: "1.3.6.1.4.1.14519.5.2.1.6279.6001.%030d" % n)
uri = factory.LazyAttribute(lambda f: 'file:///tmp/%s/' % f.series_instance_uid)
class ImageLocationFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.ImageLocation
series = factory.LazyAttribute(lambda f: f.factory_parent.case.series)
x = factory.fuzzy.FuzzyInteger(0, 256)
y = factory.fuzzy.FuzzyInteger(0, 256)
z = factory.fuzzy.FuzzyInteger(0, 16)
| import factory
import factory.fuzzy
from backend.images import models
class ImageSeriesFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.ImageSeries
patient_id = factory.Sequence(lambda n: "TEST-SERIES-%04d" % n)
series_instance_uid = factory.Sequence(lambda n: "1.3.6.1.4.1.14519.5.2.1.6279.6001.%030d" % n)
uri = factory.LazyAttribute(lambda f: 'file:///tmp/%s/' % f.series_instance_uid)
class ImageLocationFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.ImageLocation
series = factory.LazyAttribute(lambda f: f.factory_parent.case.series)
x = factory.fuzzy.FuzzyInteger(0, 511)
y = factory.fuzzy.FuzzyInteger(0, 511)
z = factory.fuzzy.FuzzyInteger(0, 63)
| mit | Python |
9195943db5efcf9c847422d521ef1b0ae4124526 | Update __init__.py | hammerlab/fancyimpute,iskandr/fancyimpute | fancyimpute/__init__.py | fancyimpute/__init__.py | from __future__ import absolute_import, print_function, division
from .solver import Solver
from .nuclear_norm_minimization import NuclearNormMinimization
from .matrix_factorization import MatrixFactorization
from .iterative_svd import IterativeSVD
from .simple_fill import SimpleFill
from .soft_impute import SoftImpute
from .scaler import BiScaler
from .knn import KNN
from .similarity_weighted_averaging import SimilarityWeightedAveraging
# while iterative imputer is experimental in sklearn, we need this
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
__version__ = "0.5.4"
__all__ = [
"Solver",
"NuclearNormMinimization",
"MatrixFactorization",
"IterativeSVD",
"SimpleFill",
"SoftImpute",
"BiScaler",
"KNN",
"SimilarityWeightedAveraging",
"IterativeImputer"
]
| from __future__ import absolute_import, print_function, division
from .solver import Solver
from .nuclear_norm_minimization import NuclearNormMinimization
from .matrix_factorization import MatrixFactorization
from .iterative_svd import IterativeSVD
from .simple_fill import SimpleFill
from .soft_impute import SoftImpute
from .scaler import BiScaler
from .knn import KNN
from .similarity_weighted_averaging import SimilarityWeightedAveraging
# while iterative imputer is experimental in sklearn, we need this
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
__version__ = "0.5.3"
__all__ = [
"Solver",
"NuclearNormMinimization",
"MatrixFactorization",
"IterativeSVD",
"SimpleFill",
"SoftImpute",
"BiScaler",
"KNN",
"SimilarityWeightedAveraging",
"IterativeImputer"
]
| apache-2.0 | Python |
b12e96f6365746b21b929bbd827c2070f183b01e | Fix lint issues | watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder | feder/domains/models.py | feder/domains/models.py | from feder.organisations.models import Organisation
from django.db import models
from model_utils.models import TimeStampedModel
from django.utils.translation import ugettext_lazy as _
class DomainQuerySet(models.QuerySet):
def for_user(self, user):
return self
class Domain(TimeStampedModel):
name = models.CharField(verbose_name=_("Name"), max_length=50)
active = models.BooleanField(default=True, help_text=_("Activity status"))
organisation = models.ForeignKey(
to=Organisation,
verbose_name=_("Organisation"),
on_delete=models.CASCADE,
null=True, # TODO(ad-m): make field required after data migration
)
objects = DomainQuerySet.as_manager()
class Meta:
verbose_name = _("Domain")
verbose_name_plural = _("Domains")
ordering = ["created"]
def __str__(self):
return self.name
| from feder.organisations.models import Organisation
from django.db import models
from model_utils.models import TimeStampedModel
from django.utils.translation import ugettext_lazy as _
class DomainQuerySet(models.QuerySet):
def for_user(self, user):
return self
class Domain(TimeStampedModel):
name = models.CharField(verbose_name=_("Name"), max_length=50)
active = models.BooleanField(default=True, help_text=_("Activity status"))
organisation = models.ForeignKey(
to=Organisation,
verbose_name=_("Organisation"),
on_delete=models.CASCADE,
null=True, # TODO(ad-m): make field required after data migration
)
objects = DomainQuerySet.as_manager()
class Meta:
verbose_name = _("Domain")
verbose_name_plural = _("Domains")
ordering = ["created"]
def __str__(self):
return self.name
| mit | Python |
0e8ebdff245239de28f3d43897038a7bb37c0b1d | Change row offset | ebegoli/SynthNotes | loadNotes.py | loadNotes.py | from synthnotes.generators import NoteGenerator
import psycopg2 as psy
def main():
generator = NoteGenerator()
params = {'database': 'synthea', 'user': 'postgres', 'host': '172.22.10.147'}
conn = psy.connect(**params)
cur = conn.cursor()
cur.execute("""select id, person_id, start, stop from encounter
where display like 'Psychiatric procedure or service' limit 10;
""")
results = cur.fetchall()
for row in results:
encounter_id = row[0]
person_id = row[1]
start = row[2]
stop = row[3]
note = generator.generate()
cur.execute("""
INSERT into notes(encounter_id, person_id, note, start, stop)
Values (%s, %s, %s, %s)
""", encounter_id, person_id, note, start, stop)
if __name__ == '__main__':
main()
| from synthnotes.generators import NoteGenerator
import psycopg2 as psy
def main():
generator = NoteGenerator()
params = {'database': 'synthea', 'user': 'postgres', 'host': '172.22.10.147'}
conn = psy.connect(**params)
cur = conn.cursor()
cur.execute("""select id, person_id, start, stop from encounter
where display like 'Psychiatric procedure or service' limit 10;
""")
results = cur.fetchall()
for row in results:
encounter_id = row[0]
person_id = row[1]
start = row[5]
stop = row[6]
note = generator.generate()
cur.execute("""
INSERT into notes(encounter_id, person_id, note, start, stop)
Values (%s, %s, %s, %s)
""", encounter_id, person_id, note, start, stop)
if __name__ == '__main__':
main()
| mit | Python |
cd44f9b09f6a961b621091155f418a11adb55dd3 | Document components/ansible | xii/xii,xii/xii | src/xii/builtin/components/ansible/component.py | src/xii/builtin/components/ansible/component.py |
from xii.component import Component
from xii.need import NeedIO, NeedSSH
from xii import error
from ansible import NeedAnsible
class AnsibleComponent(Component, NeedAnsible, NeedIO, NeedSSH):
"""
Easily provision created images using standard ansible.
.. note::
Make sure the ansible commandline tool is installed and working
if you want to use this feature.
Example definition:
::
# vim: set ts=2 sw=2 tw=0 ft=yaml:
---
my-vms:
type: node
pool: default
image: {{ image }}
count: 4
user:
root:
password: linux
ssh:
copy-key:
users:
- root
privision-vms:
type: ansible
hosts:
all: my-vms
group-A: [my-vms-1, my-vms-3]
group-B: [my-vms-2, my-vms-4]
run: privison-vms.yml
env:
extra_env: additional_variables.conf
Checkout the :doc:`/components/ansible/hosts` for more information about
how to populate the inventor list
"""
short_description="Provision virtual hosts using ansible"
ctype = "ansible"
required_attributes = ["hosts", "run"]
default_attributes = ["hosts", "env"]
requires = ["network", "node"]
def ansible_executable(self):
return "ansible-playbook"
def validate(self):
if not self.is_ansible_installed():
raise error.NotFound("Need ansible installed to provision nodes")
Component.validate(self)
def start(self):
tmp = self.io().mktempdir("xii-ansible")
inventory = self.get_attribute("hosts").generate_inventory(tmp)
envvars = self.get_attribute("env").get_vars()
playbook = self.get_attribute("run").get_playbook(tmp)
try:
self.say("run playbook...")
# FIXME: Make sure all hosts are up now!
status = self.run_playbook(inventory, playbook, env=envvars)
finally:
self.io().rm(tmp)
if status:
self.success("provisioned!")
else:
self.warn("provisioning failed!")
|
from xii.component import Component
from xii.need import NeedIO, NeedSSH
from xii import error
from ansible import NeedAnsible
class AnsibleComponent(Component, NeedAnsible, NeedIO, NeedSSH):
ctype = "ansible"
required_attributes = ["hosts", "run"]
default_attributes = ["hosts", "env"]
requires = ["network", "node"]
def ansible_executable(self):
return "ansible-playbook"
def validate(self):
if not self.is_ansible_installed():
raise error.NotFound("Need ansible installed to provision nodes")
Component.validate(self)
def start(self):
tmp = self.io().mktempdir("xii-ansible")
inventory = self.get_attribute("hosts").generate_inventory(tmp)
envvars = self.get_attribute("env").get_vars()
playbook = self.get_attribute("run").get_playbook(tmp)
try:
self.say("run playbook...")
# FIXME: Make sure all hosts are up now!
status = self.run_playbook(inventory, playbook, env=envvars)
finally:
self.io().rm(tmp)
if status:
self.success("provisioned!")
else:
self.warn("provisioning failed!")
| apache-2.0 | Python |
65531eda0f1584cb3c8552cef8593cbf69536139 | Enable strict warnings in test runs. | hzruandd/tornado,hzruandd/tornado,insflow/tornado,0xkag/tornado,bdarnell/tornado,anandology/tornado,johan--/tornado,erichuang1994/tornado,jonashagstedt/tornado,BencoLee/tornado,ifduyue/tornado,ListFranz/tornado,mivade/tornado,kevinge314gh/tornado,0x73/tornado,dsseter/tornado,wechasing/tornado,frtmelody/tornado,bufferx/tornado,djt5019/tornado,ZhuPeng/tornado,cyrilMargaria/tornado,ajdavis/tornado,lujinda/tornado,0x73/tornado,cyrusin/tornado,mr-ping/tornado,yuezhonghua/tornado,jarrahwu/tornado,kevinge314gh/tornado,Lancher/tornado,ZhuPeng/tornado,lujinda/tornado,akalipetis/tornado,kaushik94/tornado,Geoion/tornado,Callwoola/tornado,frtmelody/tornado,Acidburn0zzz/tornado,lujinda/tornado,sunjeammy/tornado,anjan-srivastava/tornado,Aaron1992/tornado,MjAbuz/tornado,lsanotes/tornado,sxfmol/tornado,mlyundin/tornado,InverseLina/tornado,xinyu7/tornado,futurechallenger/tornado,gitchs/tornado,tornadoweb/tornado,nordaux/tornado,ymero/tornado,LTD-Beget/tornado,BencoLee/tornado,kaushik94/tornado,Acidburn0zzz/tornado,anandology/tornado,LTD-Beget/tornado,mlyundin/tornado,Acidburn0zzz/tornado,Callwoola/tornado,wujuguang/tornado,leekchan/tornado_test,cyrilMargaria/tornado,kangbiao/tornado,pombredanne/tornado,elijah513/tornado,jparise/tornado,mlyundin/tornado,0x73/tornado,shaohung001/tornado,ListFranz/tornado,cyrilMargaria/tornado,gitchs/tornado,frtmelody/tornado,erichuang1994/tornado,jehiah/tornado,eXcomm/tornado,lujinda/tornado,ColorFuzzy/tornado,304471720/tornado,sxfmol/tornado,lilydjwg/tornado,codeb2cc/tornado,yuyangit/tornado,jparise/tornado,ZhuPeng/tornado,BencoLee/tornado,arthurdarcet/tornado,codecov/tornado,NoyaInRain/tornado,wujuguang/tornado,nbargnesi/tornado,jampp/tornado,304471720/tornado,wechasing/tornado,yuyangit/tornado,mivade/tornado,drewmiller/tornado,nephics/tornado,cyrilMargaria/tornado,jarrahwu/tornado,cyrusin/tornado,jparise/tornado,dongpinglai/my_tornado,nordaux/tornado,bywbilly/tornado,dongpinglai/my_tornado,whip112/tornado,hzruandd/tornado,djt5019/tornado,z-fork/tornado,andyaguiar/tornado,Windsooon/tornado,z-fork/tornado,zguangyu/tornado,ListFranz/tornado,bywbilly/tornado,sunjeammy/tornado,importcjj/tornado,erichuang1994/tornado,wxhzk/tornado-1,futurechallenger/tornado,kangbiao/tornado,mehmetkose/tornado,nbargnesi/tornado,sunjeammy/tornado,Geoion/tornado,coderhaoxin/tornado,sevenguin/tornado,Snamint/tornado,wsyzxcn/tornado,zguangyu/tornado,wxhzk/tornado-1,gwillem/tornado,andyaguiar/tornado,0xkag/tornado,dsseter/tornado,sevenguin/tornado,tianyk/tornado-research,noxiouz/tornado,mehmetkose/tornado,jsjohnst/tornado,djt5019/tornado,Aaron1992/tornado,kevinge314gh/tornado,ubear/tornado,yuezhonghua/tornado,frtmelody/tornado,ajdavis/tornado,InverseLina/tornado,kaushik94/tornado,fengsp/tornado,SuminAndrew/tornado,Polyconseil/tornado,anjan-srivastava/tornado,Fydot/tornado,ColorFuzzy/tornado,Drooids/tornado,ZhuPeng/tornado,andyaguiar/tornado,jehiah/tornado,wechasing/tornado,eklitzke/tornado,arthurdarcet/tornado,pombredanne/tornado,eXcomm/tornado,chenxiaba/tornado,Batterfii/tornado,fengsp/tornado,eXcomm/tornado,wxhzk/tornado-1,allenl203/tornado,lsanotes/tornado,cyrilMargaria/tornado,jparise/tornado,kippandrew/tornado,akalipetis/tornado,mr-ping/tornado,Acidburn0zzz/tornado,NoyaInRain/tornado,hhru/tornado,Drooids/tornado,kippandrew/tornado,Aaron1992/tornado,allenl203/tornado,anandology/tornado,304471720/tornado,pombredanne/tornado,sunjeammy/tornado,codeb2cc/tornado,ydaniv/tornado,nephics/tornado,Snamint/tornado,VShangxiao/tornado,whip112/tornado,zhuochenKIDD/tornado,Batterfii/tornado,hhru/tornado,ubear/tornado,z-fork/tornado,mehmetkose/tornado,bywbilly/tornado,ColorFuzzy/tornado,VShangxiao/tornado,shaohung001/tornado,codeb2cc/tornado,chenxiaba/tornado,Batterfii/tornado,ifduyue/tornado,Polyconseil/tornado,fengshao0907/tornado,QuanZag/tornado,pombredanne/tornado,icejoywoo/tornado,drewmiller/tornado,pombredanne/tornado,ZhuPeng/tornado,304471720/tornado,tornadoweb/tornado,gitchs/tornado,gwillem/tornado,MjAbuz/tornado,yangkf1985/tornado,arthurdarcet/tornado,VShangxiao/tornado,ymero/tornado,dongpinglai/my_tornado,mr-ping/tornado,jonashagstedt/tornado,djt5019/tornado,obsh/tornado,Windsooon/tornado,SuminAndrew/tornado,VShangxiao/tornado,anandology/tornado,zhuochenKIDD/tornado,shashankbassi92/tornado,dsseter/tornado,Windsooon/tornado,hzruandd/tornado,wechasing/tornado,ajdavis/tornado,LTD-Beget/tornado,zguangyu/tornado,elelianghh/tornado,felixonmars/tornado,Batterfii/tornado,jsjohnst/tornado,nordaux/tornado,importcjj/tornado,wxhzk/tornado-1,lsanotes/tornado,sevenguin/tornado,mehmetkose/tornado,mr-ping/tornado,nephics/tornado,arthurdarcet/tornado,jsjohnst/tornado,zhuochenKIDD/tornado,felixonmars/tornado,whip112/tornado,jarrahwu/tornado,takeshineshiro/tornado,leekchan/tornado_test,fengshao0907/tornado,AlphaStaxLLC/tornado,Acidburn0zzz/tornado,hzruandd/tornado,Fydot/tornado,mr-ping/tornado,djt5019/tornado,lsanotes/tornado,drewmiller/tornado,kevinge314gh/tornado,zhuochenKIDD/tornado,shashankbassi92/tornado,Snamint/tornado,mlyundin/tornado,chenxiaba/tornado,frtmelody/tornado,tianyk/tornado-research,SuminAndrew/tornado,kangbiao/tornado,elelianghh/tornado,legnaleurc/tornado,lujinda/tornado,0x73/tornado,bdarnell/tornado,felixonmars/tornado,obsh/tornado,Windsooon/tornado,kippandrew/tornado,mr-ping/tornado,codecov/tornado,jonashagstedt/tornado,Polyconseil/tornado,elijah513/tornado,insflow/tornado,wsyzxcn/tornado,wsyzxcn/tornado,MjAbuz/tornado,ms7s/tornado,ms7s/tornado,icejoywoo/tornado,VShangxiao/tornado,shashankbassi92/tornado,LTD-Beget/tornado,frtmelody/tornado,InverseLina/tornado,InverseLina/tornado,z-fork/tornado,ms7s/tornado,ydaniv/tornado,jehiah/tornado,sunjeammy/tornado,Snamint/tornado,fengshao0907/tornado,eXcomm/tornado,felixonmars/tornado,nephics/tornado,lsanotes/tornado,wsyzxcn/tornado,yangkf1985/tornado,legnaleurc/tornado,Snamint/tornado,anjan-srivastava/tornado,kaushik94/tornado,johan--/tornado,AlphaStaxLLC/tornado,ubear/tornado,johan--/tornado,elijah513/tornado,tornadoweb/tornado,gitchs/tornado,Fydot/tornado,yuezhonghua/tornado,jehiah/tornado,MjAbuz/tornado,jehiah/tornado,wxhzk/tornado-1,QuanZag/tornado,ydaniv/tornado,jarrahwu/tornado,Lancher/tornado,ListFranz/tornado,304471720/tornado,noxiouz/tornado,Drooids/tornado,chenxiaba/tornado,legnaleurc/tornado,felixonmars/tornado,drewmiller/tornado,insflow/tornado,yuezhonghua/tornado,akalipetis/tornado,tianyk/tornado-research,cyrusin/tornado,liqueur/tornado,Snamint/tornado,ZhuPeng/tornado,tianyk/tornado-research,yangkf1985/tornado,dongpinglai/my_tornado,shaohung001/tornado,leekchan/tornado_test,jonashagstedt/tornado,coderhaoxin/tornado,eklitzke/tornado,gwillem/tornado,mlyundin/tornado,ifduyue/tornado,kangbiao/tornado,ovidiucp/tornado,lsanotes/tornado,QuanZag/tornado,ajdavis/tornado,0xkag/tornado,liqueur/tornado,elelianghh/tornado,MjAbuz/tornado,sevenguin/tornado,obsh/tornado,icejoywoo/tornado,shaohung001/tornado,bdarnell/tornado,nordaux/tornado,eklitzke/tornado,dsseter/tornado,andyaguiar/tornado,jsjohnst/tornado,Drooids/tornado,xinyu7/tornado,elelianghh/tornado,wsyzxcn/tornado,elijah513/tornado,liqueur/tornado,cyrusin/tornado,yangkf1985/tornado,xinyu7/tornado,allenl203/tornado,obsh/tornado,eXcomm/tornado,ajdavis/tornado,bdarnell/tornado,Callwoola/tornado,liqueur/tornado,chenxiaba/tornado,liqueur/tornado,jparise/tornado,gwillem/tornado,fengsp/tornado,codecov/tornado,LTD-Beget/tornado,ovidiucp/tornado,QuanZag/tornado,jampp/tornado,shashankbassi92/tornado,icejoywoo/tornado,Fydot/tornado,akalipetis/tornado,whip112/tornado,cyrilMargaria/tornado,Lancher/tornado,lilydjwg/tornado,yuyangit/tornado,ovidiucp/tornado,mehmetkose/tornado,sxfmol/tornado,codeb2cc/tornado,johan--/tornado,Windsooon/tornado,ListFranz/tornado,BencoLee/tornado,akalipetis/tornado,ms7s/tornado,nbargnesi/tornado,ms7s/tornado,0xkag/tornado,yuezhonghua/tornado,Acidburn0zzz/tornado,sevenguin/tornado,elelianghh/tornado,cyrusin/tornado,shaohung001/tornado,ovidiucp/tornado,jonashagstedt/tornado,kangbiao/tornado,yuyangit/tornado,bywbilly/tornado,hhru/tornado,wsyzxcn/tornado,arthurdarcet/tornado,jsjohnst/tornado,eklitzke/tornado,noxiouz/tornado,anandology/tornado,takeshineshiro/tornado,ifduyue/tornado,shashankbassi92/tornado,SuminAndrew/tornado,yuezhonghua/tornado,elelianghh/tornado,kaushik94/tornado,codeb2cc/tornado,erichuang1994/tornado,importcjj/tornado,eklitzke/tornado,ubear/tornado,nbargnesi/tornado,dsseter/tornado,ColorFuzzy/tornado,erichuang1994/tornado,drewmiller/tornado,ovidiucp/tornado,wsyzxcn/tornado,coderhaoxin/tornado,importcjj/tornado,elijah513/tornado,QuanZag/tornado,fengshao0907/tornado,bywbilly/tornado,zguangyu/tornado,gitchs/tornado,Lancher/tornado,Callwoola/tornado,noxiouz/tornado,jsjohnst/tornado,fengsp/tornado,hzruandd/tornado,NoyaInRain/tornado,Polyconseil/tornado,obsh/tornado,jampp/tornado,ydaniv/tornado,bufferx/tornado,takeshineshiro/tornado,futurechallenger/tornado,fengsp/tornado,yuyangit/tornado,allenl203/tornado,ymero/tornado,jampp/tornado,yangkf1985/tornado,mehmetkose/tornado,ymero/tornado,coderhaoxin/tornado,johan--/tornado,zguangyu/tornado,LTD-Beget/tornado,NoyaInRain/tornado,codeb2cc/tornado,Callwoola/tornado,AlphaStaxLLC/tornado,andyaguiar/tornado,chenxiaba/tornado,akalipetis/tornado,dongpinglai/my_tornado,kevinge314gh/tornado,leekchan/tornado_test,fengsp/tornado,insflow/tornado,shashankbassi92/tornado,Polyconseil/tornado,coderhaoxin/tornado,SuminAndrew/tornado,drewmiller/tornado,Aaron1992/tornado,importcjj/tornado,allenl203/tornado,kevinge314gh/tornado,Geoion/tornado,legnaleurc/tornado,dsseter/tornado,MjAbuz/tornado,jarrahwu/tornado,Aaron1992/tornado,Polyconseil/tornado,Fydot/tornado,wujuguang/tornado,Geoion/tornado,nbargnesi/tornado,kippandrew/tornado,ymero/tornado,icejoywoo/tornado,jarrahwu/tornado,InverseLina/tornado,Drooids/tornado,ListFranz/tornado,noxiouz/tornado,Batterfii/tornado,jampp/tornado,sxfmol/tornado,jampp/tornado,QuanZag/tornado,anjan-srivastava/tornado,pombredanne/tornado,bywbilly/tornado,ColorFuzzy/tornado,lilydjwg/tornado,anjan-srivastava/tornado,mivade/tornado,AlphaStaxLLC/tornado,Callwoola/tornado,kangbiao/tornado,dongpinglai/my_tornado,nephics/tornado,Fydot/tornado,wechasing/tornado,obsh/tornado,z-fork/tornado,tianyk/tornado-research,AlphaStaxLLC/tornado,ms7s/tornado,kippandrew/tornado,sxfmol/tornado,nordaux/tornado,liqueur/tornado,anandology/tornado,zhuochenKIDD/tornado,eXcomm/tornado,shaohung001/tornado,z-fork/tornado,insflow/tornado,erichuang1994/tornado,whip112/tornado,Batterfii/tornado,gwillem/tornado,andyaguiar/tornado,leekchan/tornado_test,tornadoweb/tornado,wujuguang/tornado,kippandrew/tornado,sevenguin/tornado,djt5019/tornado,fengshao0907/tornado,ubear/tornado,ymero/tornado,insflow/tornado,lujinda/tornado,AlphaStaxLLC/tornado,takeshineshiro/tornado,fengshao0907/tornado,legnaleurc/tornado,whip112/tornado,ColorFuzzy/tornado,ydaniv/tornado,BencoLee/tornado,BencoLee/tornado,ovidiucp/tornado,zhuochenKIDD/tornado,hhru/tornado,mivade/tornado,Drooids/tornado,codecov/tornado,elijah513/tornado,nbargnesi/tornado,Lancher/tornado,gitchs/tornado,futurechallenger/tornado,zguangyu/tornado,Geoion/tornado,VShangxiao/tornado,arthurdarcet/tornado,0x73/tornado,mlyundin/tornado,jparise/tornado,coderhaoxin/tornado,johan--/tornado,Geoion/tornado,xinyu7/tornado,wechasing/tornado,wujuguang/tornado,futurechallenger/tornado,wxhzk/tornado-1,ifduyue/tornado,bdarnell/tornado,gwillem/tornado,bufferx/tornado,yangkf1985/tornado,bufferx/tornado,takeshineshiro/tornado,NoyaInRain/tornado,lilydjwg/tornado,anjan-srivastava/tornado,futurechallenger/tornado,noxiouz/tornado,xinyu7/tornado,304471720/tornado,0xkag/tornado,ydaniv/tornado,NoyaInRain/tornado,icejoywoo/tornado,sxfmol/tornado,cyrusin/tornado,InverseLina/tornado,xinyu7/tornado,mivade/tornado,Windsooon/tornado,bufferx/tornado,hhru/tornado,takeshineshiro/tornado,ubear/tornado,importcjj/tornado | tornado/test/runtests.py | tornado/test/runtests.py | #!/usr/bin/env python
import unittest
TEST_MODULES = [
'tornado.httputil.doctests',
'tornado.iostream.doctests',
'tornado.util.doctests',
'tornado.test.auth_test',
'tornado.test.curl_httpclient_test',
'tornado.test.escape_test',
'tornado.test.gen_test',
'tornado.test.httpclient_test',
'tornado.test.httpserver_test',
'tornado.test.httputil_test',
'tornado.test.import_test',
'tornado.test.ioloop_test',
'tornado.test.iostream_test',
'tornado.test.process_test',
'tornado.test.simple_httpclient_test',
'tornado.test.stack_context_test',
'tornado.test.template_test',
'tornado.test.testing_test',
'tornado.test.twisted_test',
'tornado.test.web_test',
'tornado.test.wsgi_test',
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
if __name__ == '__main__':
# The -W command-line option does not work in a virtualenv with
# python 3 (as of virtualenv 1.7), so configure warnings
# programmatically instead.
import warnings
# Be strict about most warnings. This also turns on warnings that are
# ignored by default, including DeprecationWarnings and
# python 3.2's ResourceWarnings.
warnings.filterwarnings("error")
# Tornado shouldn't use anything deprecated, but some of our
# dependencies do (last match wins).
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("error", category=DeprecationWarning,
module=r"tornado\..*")
import tornado.testing
tornado.testing.main()
| #!/usr/bin/env python
import unittest
TEST_MODULES = [
'tornado.httputil.doctests',
'tornado.iostream.doctests',
'tornado.util.doctests',
'tornado.test.auth_test',
'tornado.test.curl_httpclient_test',
'tornado.test.escape_test',
'tornado.test.gen_test',
'tornado.test.httpclient_test',
'tornado.test.httpserver_test',
'tornado.test.httputil_test',
'tornado.test.import_test',
'tornado.test.ioloop_test',
'tornado.test.iostream_test',
'tornado.test.process_test',
'tornado.test.simple_httpclient_test',
'tornado.test.stack_context_test',
'tornado.test.template_test',
'tornado.test.testing_test',
'tornado.test.twisted_test',
'tornado.test.web_test',
'tornado.test.wsgi_test',
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
if __name__ == '__main__':
import tornado.testing
tornado.testing.main()
| apache-2.0 | Python |
898ca320fe2c5d70e050d55866d8fcf6b2a8d532 | Add register_opts function to sslutils | openstack/oslo.service | oslo_service/sslutils.py | oslo_service/sslutils.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import ssl
from oslo_service._i18n import _
from oslo_service import _options
config_section = 'ssl'
def list_opts():
"""Entry point for oslo-config-generator."""
return [(config_section, copy.deepcopy(_options.ssl_opts))]
def register_opts(conf):
"""Registers sslutils config options."""
return conf.register_opts(_options.ssl_opts, config_section)
def is_enabled(conf):
conf.register_opts(_options.ssl_opts, config_section)
cert_file = conf.ssl.cert_file
key_file = conf.ssl.key_file
ca_file = conf.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(conf, sock):
conf.register_opts(_options.ssl_opts, config_section)
ssl_kwargs = {
'server_side': True,
'certfile': conf.ssl.cert_file,
'keyfile': conf.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if conf.ssl.ca_file:
ssl_kwargs['ca_certs'] = conf.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
| # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import ssl
from oslo_service._i18n import _
from oslo_service import _options
config_section = 'ssl'
def list_opts():
"""Entry point for oslo-config-generator."""
return [(config_section, copy.deepcopy(_options.ssl_opts))]
def is_enabled(conf):
conf.register_opts(_options.ssl_opts, config_section)
cert_file = conf.ssl.cert_file
key_file = conf.ssl.key_file
ca_file = conf.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(conf, sock):
conf.register_opts(_options.ssl_opts, config_section)
ssl_kwargs = {
'server_side': True,
'certfile': conf.ssl.cert_file,
'keyfile': conf.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if conf.ssl.ca_file:
ssl_kwargs['ca_certs'] = conf.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
| apache-2.0 | Python |
ad89d2c87e8f835b2ae2cc204393143b783f3480 | Add time decorator | LIAMF-USP/word2vec-TF,LIAMF-USP/word2vec-TF,LIAMF-USP/word2vec-TF | src/utils.py | src/utils.py | import pickle
import time
import unittest
timing = {}
def get_time(f, args=[]):
"""
After using timeit we can get the duration of the function f
when it was applied in parameters args. Normally it is expected
that args is a list of parameters, but it can be also a single parameter.
:type f: function
:type args: list
:rtype: float
"""
if type(args) != list:
args = [args]
key = f.__name__
if args != []:
key += "-" + "-".join([str(arg) for arg in args])
return timing[key]
def timeit(index_args=[]):
def dec(method):
"""
Decorator for time information
"""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
timed.__name__ = method.__name__
te = time.time()
fkey = method.__name__
for i, arg in enumerate(args):
if i in index_args:
fkey += "-" + str(arg)
timing[fkey] = te - ts
return result
return timed
return dec
def get_date_and_time():
"""
Function to create an unique label
using the date and time.
:rtype: str
"""
return time.strftime('%d-%m-%Y_%H-%M-%S')
def run_test(testClass, header):
"""
Function to run all the tests from a class of tests.
:type testClass: unittest.TesCase
:type header: str
"""
print(header)
suite = unittest.TestLoader().loadTestsFromTestCase(testClass)
unittest.TextTestRunner(verbosity=2).run(suite)
def get_revert_dict(some_dict):
"""
Reverting a dict
:type some_dict: dict
:rtype: dict
"""
reverse_dict = {v: k for k, v in some_dict.items()}
return reverse_dict
def load_embeddings(pickle_path):
"""
Function that receives a path to a pickle file. We assume that
in this file we have two objects:
-- embeddings : the matrix of word embeddings
-- word2index : a dict of the form word : index.
:type pickle_path: str
:rtype: np.array, dict
"""
with open(pickle_path, "rb") as file:
d = pickle.load(file)
pass
embeddings = d['embeddings']
word2index = d['word2index']
del d
return embeddings, word2index
| import pickle
import time
import unittest
def get_date_and_time():
"""
Function to create an unique label
using the date and time.
:rtype: str
"""
return time.strftime('%d-%m-%Y_%H-%M-%S')
def run_test(testClass, header):
"""
Function to run all the tests from a class of tests.
:type testClass: unittest.TesCase
:type header: str
"""
print(header)
suite = unittest.TestLoader().loadTestsFromTestCase(testClass)
unittest.TextTestRunner(verbosity=2).run(suite)
def get_revert_dict(some_dict):
"""
Reverting a dict
:type some_dict: dict
:rtype: dict
"""
reverse_dict = {v: k for k, v in some_dict.items()}
return reverse_dict
def load_embeddings(pickle_path):
"""
Function that receives a path to a pickle file. We assume that
in this file we have two objects:
-- embeddings : the matrix of word embeddings
-- word2index : a dict of the form word : index.
:type pickle_path: str
:rtype: np.array, dict
"""
with open(pickle_path, "rb") as file:
d = pickle.load(file)
pass
embeddings = d['embeddings']
word2index = d['word2index']
del d
return embeddings, word2index
| mit | Python |
a58408c28bffb29fea72d2ec045ffcf23ef3f29a | bump version number to 0.0.1 | ckcnik/django-forums,byteweaver/django-forums,ckcnik/django-forums,byteweaver/django-forums | forums/__init__.py | forums/__init__.py | __version__ = '0.0.1'
| __version__ = '0.0.0'
| bsd-3-clause | Python |
82bb0ba1f85f2277589eb8309903f38e22e342b7 | Fix bug where arrays are sent as strings. | AustinStoneProjects/Founderati-Server,AustinStoneProjects/Founderati-Server | project/utils.py | project/utils.py | from flask import jsonify
def mergeFrom(fromData, toData, keysToMerge, require=True):
for key in keysToMerge:
if key in fromData:
toData[key] = fromData[key]
elif require == True:
raise Exception('Missing required parameter %s' % key)
# gracefully handles errors with incomplete model data.
def jsonFields(modelInstance, fields, response=True, extra=None):
entries = {'error' : None}
for key in fields:
val = modelInstance.get(key)
entries[key] = val if (type(val) in [str, int, float, list, dict] or val is None) else str(val)
if not extra is None:
entries.update(extra)
## TODO: reporting of incomplete models
if response:
return jsonify(**entries)
return entries
#This is for helping construct the proper query to send off to the search function
#elastic search term queries can only take single words without spaces
#this function breaks any multi word filters into a group of single word filters
def fix_term_filter(term_filter):
"""
Makes sure that the term_filter has only a single entry and the term filter doesn't have spaces. If the
term_filter does have spaces, it returns a list of single word term_filters for the same field. Otherwise
returns a list with a single term filter
"""
output = []
for entry in term_filter:
if " " in (term_filter[entry]):
terms = term_filter[entry].split(" ")
for term in terms:
output.append({entry:term.lower()})
else:
output.append({entry:term_filter[entry].lower()})
return output
def arr_diff(a,b):
# a = [1,2,3]
# b = [2,3,4,5,6]
#diff(a,b) = 1
b = set(b)
return [aa for aa in a if aa not in b]
| from flask import jsonify
def mergeFrom(fromData, toData, keysToMerge, require=True):
for key in keysToMerge:
if key in fromData:
toData[key] = fromData[key]
elif require == True:
raise Exception('Missing required parameter %s' % key)
# gracefully handles errors with incomplete model data.
def jsonFields(modelInstance, fields, response=True, extra=None):
entries = {'error' : None}
for key in fields:
val = modelInstance.get(key)
entries[key] = val if (type(val) in [str, int, float] or val is None) else str(val)
if not extra is None:
entries.update(extra)
## TODO: reporting of incomplete models
if response:
return jsonify(**entries)
return entries
#This is for helping construct the proper query to send off to the search function
#elastic search term queries can only take single words without spaces
#this function breaks any multi word filters into a group of single word filters
def fix_term_filter(term_filter):
"""
Makes sure that the term_filter has only a single entry and the term filter doesn't have spaces. If the
term_filter does have spaces, it returns a list of single word term_filters for the same field. Otherwise
returns a list with a single term filter
"""
output = []
for entry in term_filter:
if " " in (term_filter[entry]):
terms = term_filter[entry].split(" ")
for term in terms:
output.append({entry:term.lower()})
else:
output.append({entry:term_filter[entry].lower()})
return output
def arr_diff(a,b):
# a = [1,2,3]
# b = [2,3,4,5,6]
#diff(a,b) = 1
b = set(b)
return [aa for aa in a if aa not in b] | apache-2.0 | Python |
3fe0f73d9c9ca177cefd61636f10be77aa1261d0 | Remove bank information from form. | fgaudin/aemanager,fgaudin/aemanager,fgaudin/aemanager | autoentrepreneur/forms.py | autoentrepreneur/forms.py | from django.forms import ModelForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from autoentrepreneur.models import UserProfile, AUTOENTREPRENEUR_ACTIVITY, \
AUTOENTREPRENEUR_PAYMENT_OPTION
class UserProfileForm(ModelForm):
company_name = forms.CharField(required=False, max_length=255, label=_('Company name'))
company_id = forms.CharField(max_length=50, label=_('Company id')) # SIRET for France
activity = forms.ChoiceField(choices=AUTOENTREPRENEUR_ACTIVITY, label=_('Activity'))
creation_date = forms.DateField(label=_('Creation date'))
creation_help = forms.BooleanField(required=False, label=_('Creation help')) # accre
freeing_tax_payment = forms.BooleanField(required=False, label=_('Freeing tax payment')) # versement liberatoire
payment_option = forms.ChoiceField(choices=AUTOENTREPRENEUR_PAYMENT_OPTION, label=_('Payment option'))
class Meta:
model = UserProfile
exclude = ['user', 'address']
| from django.forms import ModelForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from autoentrepreneur.models import UserProfile, AUTOENTREPRENEUR_ACTIVITY, \
AUTOENTREPRENEUR_PAYMENT_OPTION
class UserProfileForm(ModelForm):
company_name = forms.CharField(required=False, max_length=255, label=_('Company name'))
company_id = forms.CharField(max_length=50, label=_('Company id')) # SIRET for France
bank_information = forms.CharField(required=False, max_length=255, label=_('Bank information'))
activity = forms.ChoiceField(choices=AUTOENTREPRENEUR_ACTIVITY, label=_('Activity'))
creation_date = forms.DateField(label=_('Creation date'))
creation_help = forms.BooleanField(required=False, label=_('Creation help')) # accre
freeing_tax_payment = forms.BooleanField(required=False, label=_('Freeing tax payment')) # versement liberatoire
payment_option = forms.ChoiceField(choices=AUTOENTREPRENEUR_PAYMENT_OPTION, label=_('Payment option'))
class Meta:
model = UserProfile
exclude = ['user', 'address']
| agpl-3.0 | Python |
0caf7c1ac8ea2a0fc9c2d7c46121fc48d830d855 | update pymhex for new pycaffe interface | ronghanghu/mhex_graph,ronghanghu/mhex_graph | pymhex/load_mhex_into_caffe.py | pymhex/load_mhex_into_caffe.py | #! /usr/bin/python
def load_mhex(caffe_prototxt, caffe_model, mhex_mat_file, save_file, load_mat1=True, load_mat2=True):
"""
load matrices dumped from matlab into Caffe network
MHEX implemenatation in Caffe consists of two InnerProductLayer at bottom and
top, and one SoftmaxLayer between them.
The inner product layer below softmax should be named "mhex_mat1", and
The inner product layer above softmax should be named "mhex_mat2".
"""
import numpy as np, scipy.io
import caffe
# load architecture for pure Caffe net and the fine-tuned model
caffe.set_mode_cpu()
if len(caffe_model) > 0:
net = caffe.Net(caffe_prototxt, caffe_model, caffe.TRAIN)
else:
net = caffe.Net(caffe_prototxt, caffe.TRAIN)
# load R-CNN model weights and scalings
mat = scipy.io.loadmat(mhex_mat_file)
M1, M2 = mat['M1'], mat['M2']
# Caffe requires 4 dimensions
M1 = M1[np.newaxis, np.newaxis, :, :]
M2 = M2[np.newaxis, np.newaxis, :, :]
# coerce to C-contiguous memory for Caffe
# (numpy makes it seem to be so already, but its illusory: check .flags)
M1 = np.ascontiguousarray(M1)
M2 = np.ascontiguousarray(M2)
# transplant SVMs into fc-rcnn. [0] is weight matrix, [1] is bias
if load_mat1:
net.params['mhex_mat1'][0].data[...] = M1
net.params['mhex_mat1'][1].data[...] = 0
if load_mat2:
net.params['mhex_mat2'][0].data[...] = M2
net.params['mhex_mat2'][1].data[...] = 0
# save
net.save(save_file)
| #! /usr/bin/python
def load_mhex(caffe_prototxt, caffe_model, mhex_mat_file, save_file, load_mat1=True, load_mat2=True):
"""
load matrices dumped from matlab into Caffe network
MHEX implemenatation in Caffe consists of two InnerProductLayer at bottom and
top, and one SoftmaxLayer between them.
The inner product layer below softmax should be named "mhex_mat1", and
The inner product layer above softmax should be named "mhex_mat2".
"""
import numpy as np, scipy.io
import caffe
# load architecture for pure Caffe net and the fine-tuned model
caffe.set_mode_cpu()
if len(caffe_model) > 0:
net = caffe.Net(caffe_prototxt, caffe_model)
else:
net = caffe.Net(caffe_prototxt)
# load R-CNN model weights and scalings
mat = scipy.io.loadmat(mhex_mat_file)
M1, M2 = mat['M1'], mat['M2']
# Caffe requires 4 dimensions
M1 = M1[np.newaxis, np.newaxis, :, :]
M2 = M2[np.newaxis, np.newaxis, :, :]
# coerce to C-contiguous memory for Caffe
# (numpy makes it seem to be so already, but its illusory: check .flags)
M1 = np.ascontiguousarray(M1)
M2 = np.ascontiguousarray(M2)
# transplant SVMs into fc-rcnn. [0] is weight matrix, [1] is bias
if load_mat1:
net.params['mhex_mat1'][0].data[...] = M1
net.params['mhex_mat1'][1].data[...] = 0
if load_mat2:
net.params['mhex_mat2'][0].data[...] = M2
net.params['mhex_mat2'][1].data[...] = 0
# save
net.save(save_file)
| bsd-2-clause | Python |
170ffc100f36f3676a32f4ea5a464dcb131b7f2f | Add docstrings for TestUtils class. | jaidevd/pysemantic,jaidevd/pysemantic,dataculture/pysemantic,dataculture/pysemantic | pysemantic/tests/test_utils.py | pysemantic/tests/test_utils.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the BSD 3-clause license.
"""
Tests for a the pysemantic.utils module.
"""
import unittest
import os.path as op
from pysemantic.utils import colnames, get_md5_checksum
class TestUtils(unittest.TestCase):
def setUp(self):
self.filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"iris.csv")
def test_colnames(self):
"""Test if the column names are read correctly from a file."""
ideal = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width',
'Species']
actual = colnames(self.filepath)
self.assertItemsEqual(actual, ideal)
def test_colnames_infer_parser_from_extension(self):
"""Test if the colnames function can infer the correct parser from the
file extension."""
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
actual = colnames(filepath)
self.assertItemsEqual(actual, ideal)
def test_colnames_parser_arg(self):
"""Test if the colnames are read if the parser is specified."""
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
from pandas import read_table
actual = colnames(filepath, parser=read_table)
self.assertItemsEqual(actual, ideal)
def test_colnames_infer_parser_from_sep(self):
"""Test if the colnames are read if the separator is specified."""
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
actual = colnames(filepath, sep='\\t')
self.assertItemsEqual(actual, ideal)
def test_md5(self):
"""Test the md5 checksum calculator."""
ideal = "9b3ecf3031979169c0ecc5e03cfe20a6"
actual = get_md5_checksum(self.filepath)
self.assertEqual(ideal, actual)
if __name__ == '__main__':
unittest.main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the BSD 3-clause license.
"""
Tests for a the pysemantic.utils module.
"""
import unittest
import os.path as op
from pysemantic.utils import colnames, get_md5_checksum
class TestUtils(unittest.TestCase):
def setUp(self):
self.filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"iris.csv")
def test_colnames(self):
ideal = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width',
'Species']
actual = colnames(self.filepath)
self.assertItemsEqual(actual, ideal)
def test_colnames_infer_parser_from_extension(self):
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
actual = colnames(filepath)
self.assertItemsEqual(actual, ideal)
def test_colnames_parser_arg(self):
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
from pandas import read_table
actual = colnames(filepath, parser=read_table)
self.assertItemsEqual(actual, ideal)
def test_colnames_infer_parser_from_sep(self):
filepath = op.join(op.abspath(op.dirname(__file__)), "testdata",
"person_activity.tsv")
ideal = "sequence_name tag date x y z activity".split()
actual = colnames(filepath, sep='\\t')
self.assertItemsEqual(actual, ideal)
def test_md5(self):
ideal = "9b3ecf3031979169c0ecc5e03cfe20a6"
actual = get_md5_checksum(self.filepath)
self.assertEqual(ideal, actual)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
535497f5374d68a148443ba54ee74f66c44a6947 | fix pep8 errors | kderynski/acos-client,dougwig/acos-client,mdurrant-b3/acos-client,dkiser/acos-client,a10networks/acos-client,sasukeh/acos-client | acos_client/v21/admin.py | acos_client/v21/admin.py | import base
class Admin(base.BaseV21):
@property
def administrator(self):
return self.Administrator(self.client)
class Administrator(base.BaseV21):
def all(self, **kwargs):
return self._get('system.admin.administrator.getAll', **kwargs)
def get(self, name, **kwargs):
params = {"admin_name": name}
return self._post('system.admin.administrator.search', params, **kwargs)
def create(self, name, **kwargs):
params = {
"administrator": {
"admin_name": name
}
}
return self._post('system.admin.administrator.create', params, **kwargs)
def update(self, name, **kwargs):
params = {
"administrator": {
"admin_name": name
}
}
return self._post('system.admin.administrator.update', params, **kwargs)
def delete(self, name, **kwargs):
params = {"admin_name": name}
return self._post('system.admin.administrator.delete', params, **kwargs)
def all_delete(self, **kwargs):
return self._post('system.admin.administrator.deleteAll', **kwargs)
| import base
class Admin(base.BaseV21):
@property
def administrator(self):
return self.Administrator(self.client)
class Administrator(base.BaseV21):
def all(self, **kwargs):
return self._get('system.admin.administrator.getAll', **kwargs)
def get(self, name, **kwargs):
params = {"admin_name": name}
return self._post('system.admin.administrator.search', params, **kwargs)
def create(self, name, **kwargs):
params = {
"administrator": {
"admin_name": name
}
}
return self._post('system.admin.administrator.create', params, **kwargs)
def update(self, name, **kwargs):
params = {
"administrator": {
"admin_name": name
}
}
return self._post('system.admin.administrator.update', params, **kwargs)
def delete(self, name, **kwargs):
params = {"admin_name": name }
return self._post('system.admin.administrator.delete', params, **kwargs)
def all_delete(self, **kwargs):
return self._post('system.admin.administrator.deleteAll', **kwargs)
| apache-2.0 | Python |
6d0a281c3056c03aa482aac5ec1dbc7c4a85fa23 | Update version number. | tensorflow/gan,tensorflow/gan | tensorflow_gan/python/version.py | tensorflow_gan/python/version.py | # coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define TensorFlow GAN version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '1'
_MINOR_VERSION = '0'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0-dev'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = '{}-{}'.format(__version__, _VERSION_SUFFIX)
| # coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define TensorFlow GAN version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '0'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0-dev'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = '{}-{}'.format(__version__, _VERSION_SUFFIX)
| apache-2.0 | Python |
41d366140d07e6b7723bacb6e61469c111b6ca61 | Update inception model path Change: 171910161 | penguin138/serving,sreddybr3/serving,penguin138/serving,tensorflow/serving,tensorflow/serving,penguin138/serving,tensorflow/serving,sreddybr3/serving,sreddybr3/serving,penguin138/serving,sreddybr3/serving,tensorflow/serving | tensorflow_serving/workspace.bzl | tensorflow_serving/workspace.bzl | # TensorFlow Serving external dependencies that can be loaded in WORKSPACE
# files.
load('@org_tensorflow//tensorflow:workspace.bzl', 'tf_workspace')
# All TensorFlow Serving external dependencies.
# workspace_dir is the absolute path to the TensorFlow Serving repo. If linked
# as a submodule, it'll likely be '__workspace_dir__ + "/serving"'
def tf_serving_workspace():
native.new_local_repository(
name = "inception_model",
path = "tf_models/research/inception",
build_file = "tf_models/research/inception/inception/BUILD",
)
tf_workspace(path_prefix = "", tf_repo_name = "org_tensorflow")
# ===== gRPC dependencies =====
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
| # TensorFlow Serving external dependencies that can be loaded in WORKSPACE
# files.
load('@org_tensorflow//tensorflow:workspace.bzl', 'tf_workspace')
# All TensorFlow Serving external dependencies.
# workspace_dir is the absolute path to the TensorFlow Serving repo. If linked
# as a submodule, it'll likely be '__workspace_dir__ + "/serving"'
def tf_serving_workspace():
native.new_local_repository(
name = "inception_model",
path = "tf_models/inception",
build_file = "tf_models/inception/inception/BUILD",
)
tf_workspace(path_prefix = "", tf_repo_name = "org_tensorflow")
# ===== gRPC dependencies =====
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
| apache-2.0 | Python |
9120501aa20d2cb545e7ea3373a71d155a25d90a | 置換関数は配列を受け取り、splitは呼び出し側にさせる | Roadagain/EasiAAR | easiaar.py | easiaar.py | import sys
def translate_words(words, dictionary, after_sep = ' '):
translated = []
for word in words:
if word in dictionary:
translated.append(dictionary[word])
else:
translated.append(word)
return after_sep.join(translated)
dictionary = {}
for line in open(sys.argv[1]):
before, after = line.split()
dictionary[before] = after
for line in sys.stdin:
print(translate_words(line.split(), dictionary))
| import sys
def translate_words(sentence, dictionary, after_sep = ' '):
translated = []
for word in sentence.split():
if word in dictionary:
translated.append(dictionary[word])
else:
translated.append(word)
return after_sep.join(translated)
dictionary = {}
for line in open(sys.argv[1]):
before, after = line.split()
dictionary[before] = after
for line in sys.stdin:
print(translate_words(line, dictionary))
| mit | Python |
b72722c0873d7b0aaeee995ee62576af16c1cf40 | Update to reflect changes to ddesc | ChinaQuants/blaze,caseyclements/blaze,maxalbert/blaze,jcrist/blaze,aterrel/blaze,cowlicks/blaze,dwillmer/blaze,alexmojaki/blaze,cowlicks/blaze,maxalbert/blaze,LiaoPan/blaze,jdmcbr/blaze,aterrel/blaze,scls19fr/blaze,jcrist/blaze,jdmcbr/blaze,ChinaQuants/blaze,xlhtc007/blaze,ContinuumIO/blaze,alexmojaki/blaze,mrocklin/blaze,FrancescAlted/blaze,cpcloud/blaze,ContinuumIO/blaze,nkhuyu/blaze,LiaoPan/blaze,nkhuyu/blaze,aterrel/blaze,caseyclements/blaze,xlhtc007/blaze,mrocklin/blaze,dwillmer/blaze,scls19fr/blaze,FrancescAlted/blaze,FrancescAlted/blaze,cpcloud/blaze,FrancescAlted/blaze | blaze/tests/test_datetime.py | blaze/tests/test_datetime.py | from __future__ import absolute_import, division, print_function
import unittest
from datetime import date, datetime
import blaze
from datashape import dshape
from blaze.datadescriptor import ddesc_as_py
class TestDate(unittest.TestCase):
def test_create(self):
a = blaze.array(date(2000, 1, 1))
self.assertEqual(a.dshape, dshape('date'))
self.assertEqual(ddesc_as_py(a.ddesc), date(2000, 1, 1))
a = blaze.array([date(1490, 3, 12), date(2020, 7, 15)])
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(ddesc_as_py(a.ddesc),
[date(1490, 3, 12), date(2020, 7, 15)])
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(ddesc_as_py(a.ddesc),
[date(1490, 3, 12), date(2020, 7, 15)])
def test_properties(self):
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
y = a.year
self.assertEqual(ddesc_as_py(blaze.eval(y).ddesc), [1490, 2020])
m = a.month
self.assertEqual(ddesc_as_py(blaze.eval(m).ddesc), [3, 7])
d = a.day
self.assertEqual(ddesc_as_py(blaze.eval(d).ddesc), [12, 15])
| from __future__ import absolute_import, division, print_function
import unittest
from datetime import date, datetime
import blaze
from datashape import dshape
from blaze.datadescriptor import dd_as_py
class TestDate(unittest.TestCase):
def test_create(self):
a = blaze.array(date(2000, 1, 1))
self.assertEqual(a.dshape, dshape('date'))
self.assertEqual(dd_as_py(a._data), date(2000, 1, 1))
a = blaze.array([date(1490, 3, 12), date(2020, 7, 15)])
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(dd_as_py(a._data),
[date(1490, 3, 12), date(2020, 7, 15)])
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(dd_as_py(a._data),
[date(1490, 3, 12), date(2020, 7, 15)])
def test_properties(self):
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
y = a.year
self.assertEqual(dd_as_py(blaze.eval(y)._data), [1490, 2020])
m = a.month
self.assertEqual(dd_as_py(blaze.eval(m)._data), [3, 7])
d = a.day
self.assertEqual(dd_as_py(blaze.eval(d)._data), [12, 15])
| bsd-3-clause | Python |
eb33e6393cb85217aeea5c6c3cc0c24ddc8ad7f3 | Remove unnecessary lines | IshitaTakeshi/PCANet | example.py | example.py | from os.path import exists
import pickle
import gzip
from urllib.request import urlretrieve
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from pcanet import PCANet
def load_mnist():
url = "http://deeplearning.net/data/mnist/mnist.pkl.gz"
mnist_compressed = "mnist.pkl.gz"
if not exists(mnist_compressed):
print("Downloading MNIST")
urlretrieve(url, mnist_compressed)
# Load the dataset
with gzip.open(mnist_compressed, "rb") as f:
u = pickle._Unpickler(f)
u.encoding = "latin1"
data = u.load()
data = [(X.reshape(-1, 28, 28), y) for X, y in data]
return data
n_train = 1000
n_test = 1000
train_set, valid_set, test_set = load_mnist()
images_train, y_train = train_set
images_test, y_test = test_set
images_train, y_train = shuffle(images_train, y_train, random_state=0)
images_train, y_train = images_train[:n_train], y_train[:n_train]
images_test, y_test = shuffle(images_test, y_test, random_state=0)
images_test, y_test = images_test[:n_test], y_test[:n_test]
pcanet = PCANet(
image_shape=28,
filter_shape_l1=2, step_shape_l1=1, n_l1_output=4,
filter_shape_l2=2, step_shape_l2=1, n_l2_output=4,
block_shape=2
)
pcanet.validate_structure()
pcanet.fit(images_train)
X_train = pcanet.transform(images_train)
X_test = pcanet.transform(images_test)
model = RandomForestClassifier(n_estimators=100, random_state=1234, n_jobs=-1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("accuracy: " + str(accuracy))
| from os.path import exists
import pickle
import gzip
from urllib.request import urlretrieve
from sklearn.datasets import load_digits
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from pcanet import PCANet
def load_mnist():
url = "http://deeplearning.net/data/mnist/mnist.pkl.gz"
mnist_compressed = "mnist.pkl.gz"
if not exists(mnist_compressed):
print("Downloading MNIST")
urlretrieve(url, mnist_compressed)
# Load the dataset
with gzip.open(mnist_compressed, "rb") as f:
u = pickle._Unpickler(f)
u.encoding = "latin1"
data = u.load()
data = [(X.reshape(-1, 28, 28), y) for X, y in data]
return data
n_train = 1000
n_test = 1000
train_set, valid_set, test_set = load_mnist()
images_train, y_train = train_set
images_test, y_test = test_set
images_train, y_train = shuffle(images_train, y_train, random_state=0)
images_train, y_train = images_train[:n_train], y_train[:n_train]
images_test, y_test = shuffle(images_test, y_test, random_state=0)
images_test, y_test = images_test[:n_test], y_test[:n_test]
# digits = load_digits()
# images = digits.images
# y = digits.target
pcanet = PCANet(
image_shape=28,
filter_shape_l1=2, step_shape_l1=1, n_l1_output=4,
filter_shape_l2=2, step_shape_l2=1, n_l2_output=4,
block_shape=2
)
pcanet.validate_structure()
pcanet.fit(images_train)
X_train = pcanet.transform(images_train)
X_test = pcanet.transform(images_test)
model = RandomForestClassifier(n_estimators=100, random_state=1234, n_jobs=-1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("accuracy: " + str(accuracy))
| mit | Python |
612f458a0873d669092df8bb259a3c5787470747 | Fix the example. | pfaion/MPLAnimator | example.py | example.py | import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
import matplotlib.pyplot as plt
from MPLAnimator import Animator
def naive_estimator(x, data, h):
n = sum(1 for d in data if x-h/2 < d <= x+h/2)
N = len(data)
return n/(N*h)
data = [0.5, 0.7, 0.8, 1.9, 2.4, 6.1, 6.2, 7.3]
xs = np.arange(0, 8, 0.01)
h = 2
hist = [naive_estimator(x, data, h) for x in xs]
def setup():
plt.gcf().set_size_inches(8,6)
plt.suptitle("Naive Estimator for h = {}".format(h))
def frame(i):
plt.cla()
# plot original data
plt.plot(xs, hist)
plt.plot(data, [0]*len(data), 'xk')
plt.axhline(0, color='k', linewidth=0.5)
# calculate current interval
x = i / 10
x1, x2 = x-h/2, x+h/2
# calculate relative width for visualization
axis_to_data = plt.gca().transAxes + plt.gca().transData.inverted()
bottom = axis_to_data.transform((0, 0))[1]
top = -bottom
# plot visualization lines
plt.plot([x, x], [0, hist[i * 10]], '--C3')
plt.plot([x1, x1], [bottom, top], 'C3', linewidth=0.5)
plt.plot([x2, x2], [bottom, top], 'C3', linewidth=0.5)
plt.plot([x1, x2], [0, 0], 'C3', linewidth=0.5)
plt.fill_between([x1, x2], bottom, top, color='C3', alpha=0.3)
plt.plot(x, hist[i * 10], '.C3')
# highlight data in interval
highlight_data = [d for d in data if x1 < d <= x2]
plt.plot(highlight_data, [0]*len(highlight_data), 'oC3')
plt.xlim(-0.5, 8.5)
a = Animator(name='NaiveEstimator', setup_cb=setup)
a.setFrameCallback(frame_cb=frame, max_frame=80)
a.run(clear=False, prerendered=True)
| import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
import matplotlib.pyplot as plt
from MPLAnimator import Animator
def naive_estimator(x, data, h):
n = sum(1 for d in data if x-h/2 < d <= x+h/2)
N = len(data)
return n/(N*h)
data = [0.5, 0.7, 0.8, 1.9, 2.4, 6.1, 6.2, 7.3]
xs = np.arange(0, 8, 0.01)
h = 2
hist = [naive_estimator(x, data, h) for x in xs]
def setup():
plt.gcf().set_size_inches(8,6)
plt.suptitle("Naive Estimator for h = {}".format(h))
def frame(i):
plt.cla()
# plot original data
plt.plot(xs, hist)
plt.plot(data, [0]*len(data), 'xk')
plt.axhline(0, color='k', linewidth=0.5)
# calculate current interval
x = i / 10
x1, x2 = x-h/2, x+h/2
# calculate relative width for visualization
axis_to_data = plt.gca().transAxes + plt.gca().transData.inverted()
bottom = axis_to_data.transform((0, 0))[1]
top = -bottom
# plot visualization lines
plt.plot([x, x], [0, hist[i * 10]], '--C3')
plt.plot([x1, x1], [bottom, top], 'C3', linewidth=0.5)
plt.plot([x2, x2], [bottom, top], 'C3', linewidth=0.5)
plt.plot([x1, x2], [0, 0], 'C3', linewidth=0.5)
plt.fill_between([x1, x2], bottom, top, color='C3', alpha=0.3)
plt.plot(x, hist[i * 10], '.C3')
# highlight data in interval
highlight_data = [d for d in data if x1 < d <= x2]
plt.plot(highlight_data, [0]*len(highlight_data), 'oC3')
plt.xlim(-0.5, 8.5)
a = Animator(name='NaiveEstimator', setup_handle=setup)
a.setFrameCallback(frame_handle=frame, max_frame=80)
a.run(clear=False, prerendered=True)
| mit | Python |
56e647a6e3bac7185b645f75ece4059b56a4ec15 | optimize code for import | ResolveWang/WeiboSpider,ResolveWang/WeiboSpider | config/conf.py | config/conf.py | import os
import random
from yaml import load
config_path = os.path.join(os.path.dirname(__file__), 'spider.yaml')
with open(config_path, encoding='utf-8') as f:
cont = f.read()
cf = load(cont)
def get_db_args():
return cf.get('db')
def get_redis_args():
return cf.get('redis')
def get_timeout():
return cf.get('time_out')
def get_crawl_interal():
interal = random.randint(cf.get('min_crawl_interal'), cf.get('max_crawl_interal'))
return interal
def get_excp_interal():
return cf.get('excp_interal')
def get_max_repost_page():
return cf.get('max_repost_page')
def get_max_search_page():
return cf.get('max_search_page')
def get_max_home_page():
return cf.get('max_home_page')
def get_max_comment_page():
return cf.get('max_comment_page')
def get_max_retries():
return cf.get('max_retries')
def get_broker_and_backend():
redis_info = cf.get('redis')
password = redis_info.get('password')
sentinel_args = redis_info.get('sentinel', '')
db = redis_info.get('broker', 5)
if sentinel_args:
broker_url = ";".join('sentinel://:{}@{}:{}/{}'.format(password, sentinel['host'], sentinel['port'], db) for
sentinel in sentinel_args)
return broker_url
else:
host = redis_info.get('host')
port = redis_info.get('port')
backend_db = redis_info.get('backend', 6)
broker_url = 'redis://:{}@{}:{}/{}'.format(password, host, port, db)
backend_url = 'redis://:{}@{}:{}/{}'.format(password, host, port, backend_db)
return broker_url, backend_url
def get_redis_master():
return cf.get('redis').get('master', '')
def get_code_username():
return cf.get('yundama_username')
def get_code_password():
return cf.get('yundama_passwd')
def get_running_mode():
return cf.get('running_mode')
def get_crawling_mode():
return cf.get('crawling_mode')
def get_share_host_count():
return cf.get('share_host_count')
def get_cookie_expire_time():
return cf.get('cookie_expire_time')
def get_email_args():
return cf.get('email')
| import os
import random
from yaml import load
config_path = os.path.join(os.path.dirname(__file__), 'spider.yaml')
with open(config_path, encoding='utf-8') as f:
cont = f.read()
cf = load(cont)
def get_db_args():
return cf.get('db')
def get_redis_args():
return cf.get('redis')
def get_timeout():
return cf.get('time_out')
def get_crawl_interal():
interal = random.randint(cf.get('min_crawl_interal'), cf.get('max_crawl_interal'))
return interal
def get_excp_interal():
return cf.get('excp_interal')
def get_max_repost_page():
return cf.get('max_repost_page')
def get_max_search_page():
return cf.get('max_search_page')
def get_max_home_page():
return cf.get('max_home_page')
def get_max_comment_page():
return cf.get('max_comment_page')
def get_max_retries():
return cf.get('max_retries')
def get_broker_and_backend():
redis_info = cf.get('redis')
password = redis_info.get('password')
sentinel_args = redis_info.get('sentinel', '')
db = redis_info.get('broker', 5)
if sentinel_args:
broker_url = ";".join('sentinel://:{}@{}:{}/{}'.format(password, sentinel['host'], sentinel['port'], db) for
sentinel in sentinel_args)
return broker_url
else:
host = redis_info.get('host')
port = redis_info.get('port')
backend_db = redis_info.get('backend', 6)
broker_url = 'redis://:{}@{}:{}/{}'.format(password, host, port, db)
backend_url = 'redis://:{}@{}:{}/{}'.format(password, host, port, backend_db)
return broker_url, backend_url
def get_redis_master():
return cf.get('redis').get('master', '')
def get_code_username():
return cf.get('yundama_username')
def get_code_password():
return cf.get('yundama_passwd')
def get_running_mode():
return cf.get('running_mode')
def get_crawling_mode():
return cf.get('crawling_mode')
def get_share_host_count():
return cf.get('share_host_count')
def get_cookie_expire_time():
return cf.get('cookie_expire_time')
def get_email_args():
return cf.get('email')
| mit | Python |
5c7036d164e53ced72c85656ef56f89c6a4b4de5 | Fix fabfile paths | Code4SA/mma-dexter,Code4SA/mma-dexter,Code4SA/mma-dexter | fabfile.py | fabfile.py | import os
import pwd
from fabric.api import cd, env, task, require, run, sudo, prefix, shell_env
from fabric.contrib.files import exists, upload_template
VIRTUALENV_DIR = 'env'
CODE_DIR = 'mma-dexter'
PROD_HOSTS = ['wazimap.co.za']
@task
def prod():
env.deploy_type = 'prod'
env.deploy_dir = '/home/mma/'
env.branch = 'big-server'
env.hosts = PROD_HOSTS
env.user = 'mma'
@task
def deploy():
require('deploy_type', 'deploy_dir', 'branch', provided_by=[prod])
repo_dir = os.path.join(env.deploy_dir, CODE_DIR)
ve_dir = os.path.join(env.deploy_dir, CODE_DIR, VIRTUALENV_DIR)
if not exists(repo_dir):
with cd(env.deploy_dir):
run('git clone https://github.com/Code4SA/mma-dexter.git')
if not exists(ve_dir):
run('virtualenv -p python2.7 --no-site-packages %s' % ve_dir)
with cd(repo_dir):
run('git checkout -B %s' % env.branch)
run('git pull origin %s' % env.branch)
with cd(repo_dir), prefix('. %s/bin/activate' % ve_dir):
run('pip install -r requirements.txt')
# make sure logging dir exists and update processes
log_dir = os.path.join(repo_dir, 'log')
run('mkdir -p %s' % log_dir)
# log rotation
sudo('ln -fs %s/resources/logrotate/dexter /etc/logrotate.d/' % repo_dir)
# link in nginx config
sudo('ln -fs %s/resources/nginx/dexter.conf /etc/nginx/sites-enabled/' % repo_dir)
sudo('service nginx reload')
# link in upstart config
sudo('ln -fs %s/resources/upstart/dexter.conf /etc/init/' % repo_dir)
sudo('initctl reload-configuration')
# restart dexter
sudo('initctl restart dexter')
| import os
import pwd
from fabric.api import cd, env, task, require, run, sudo, prefix, shell_env
from fabric.contrib.files import exists, upload_template
VIRTUALENV_DIR = 'env'
CODE_DIR = 'mma-dexter'
PROD_HOSTS = ['wazimap.co.za']
@task
def prod():
env.deploy_type = 'prod'
env.deploy_dir = '/home/mma/mma-dexter/'
env.branch = 'master'
env.hosts = PROD_HOSTS
env.user = 'mma'
@task
def deploy():
require('deploy_type', 'deploy_dir', 'branch', provided_by=[prod])
repo_dir = os.path.join(env.deploy_dir, CODE_DIR)
ve_dir = os.path.join(env.deploy_dir, VIRTUALENV_DIR)
if not exists(repo_dir):
with cd(env.deploy_dir):
run('git clone https://github.com/Code4SA/mma-dexter.git')
if not exists(ve_dir):
run('virtualenv -p python2.7 --no-site-packages %s' % ve_dir)
with cd(repo_dir):
run('git checkout -B %s' % env.branch)
run('git pull origin %s' % env.branch)
with cd(repo_dir), prefix('. %s/bin/activate' % ve_dir):
run('pip install -r requirements.txt')
# make sure logging dir exists and update processes
log_dir = os.path.join(env.deploy_dir, 'log')
run('mkdir -p %s' % log_dir)
# log rotation
sudo('ln -fs %s/resources/logrotate/dexter /etc/logrotate.d/' % env.deploy_dir)
# link in nginx config
sudo('ln -fs %s/resources/nginx/dexter.conf /etc/nginx/sites-enabled/' % env.deploy_dir)
sudo('service nginx reload')
# link in upstart config
sudo('ln -fs %s/resources/upstart/dexter.conf /etc/init/' % env.deploy_dir)
sudo('initctl reload-configuration')
# restart dexter
sudo('service dexter restart')
| apache-2.0 | Python |
2df5b967bf395932390960e248dffddc48834337 | use rw:* supervisor group in fabfile | Lancey6/redwind,Lancey6/redwind,Lancey6/redwind | fabfile.py | fabfile.py | from fabric.api import local, prefix, cd, run, env, lcd
import datetime
env.hosts = ['orin.kylewm.com']
REMOTE_PATH = '/srv/www/kylewm.com/redwind'
def backup():
backup_dir = '~/Backups/kylewm.com/{}/'.format(
datetime.date.isoformat(datetime.date.today()))
local('mkdir -p ' + backup_dir)
local('scp orin.kylewm.com:kylewm.com.db ' + backup_dir)
def commit():
local("git add -p")
local("git diff-index --quiet HEAD || git commit")
def push():
local("git push origin master")
def pull():
with cd(REMOTE_PATH):
run("git pull origin master")
run("git submodule update")
def restart():
with cd(REMOTE_PATH):
with prefix("source venv/bin/activate"):
run("pip install -r requirements.txt")
# run("uwsgi --reload /tmp/redwind.pid")
run("supervisorctl restart rw:*")
def deploy():
commit()
push()
pull()
restart()
| from fabric.api import local, prefix, cd, run, env, lcd
import datetime
env.hosts = ['orin.kylewm.com']
REMOTE_PATH = '/srv/www/kylewm.com/redwind'
def backup():
backup_dir = '~/Backups/kylewm.com/{}/'.format(
datetime.date.isoformat(datetime.date.today()))
local('mkdir -p ' + backup_dir)
local('scp orin.kylewm.com:kylewm.com.db ' + backup_dir)
def commit():
local("git add -p")
local("git diff-index --quiet HEAD || git commit")
def push():
local("git push origin master")
def pull():
with cd(REMOTE_PATH):
run("git pull origin master")
run("git submodule update")
def restart():
with cd(REMOTE_PATH):
with prefix("source venv/bin/activate"):
run("pip install -r requirements.txt")
# run("uwsgi --reload /tmp/redwind.pid")
run("supervisorctl restart redwind redwind-qworker")
def deploy():
commit()
push()
pull()
restart()
| bsd-2-clause | Python |
862c50b2617d1ac5aeda844d64e7d987181dfd98 | Update parser.py | pkug/intelmq,aaronkaplan/intelmq,robcza/intelmq,sch3m4/intelmq,aaronkaplan/intelmq,robcza/intelmq,robcza/intelmq,aaronkaplan/intelmq,pkug/intelmq,sch3m4/intelmq,pkug/intelmq,robcza/intelmq,sch3m4/intelmq,certtools/intelmq,certtools/intelmq,certtools/intelmq,sch3m4/intelmq,pkug/intelmq | intelmq/bots/inputs/phishtank/parser.py | intelmq/bots/inputs/phishtank/parser.py | import StringIO, csv
from intelmq.lib.bot import Bot, sys
from intelmq.lib.event import Event
class PhishTankParserBot(Bot):
def process(self):
report = self.receive_message()
report = report.strip()
if report:
columns = ["__IGNORE__", "url", "description_url", "source_time", "__IGNORE__", "__IGNORE__", "__IGNORE__", "target"] # ignore (__IGNORE__) fields specific to the source and other irrelevant fields
rows = csv.DictReader(StringIO.StringIO(report), fieldnames = columns)
for row in rows:
event = Event()
for key, value in row.items():
if key is "__IGNORE__":
continue
event.add(key, value)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = PhishTankParserBot(sys.argv[1])
bot.start()
| import StringIO, csv
from intelmq.lib.bot import Bot, sys
from intelmq.lib.event import Event
class PhishTankParserBot(Bot):
def process(self):
report = self.receive_message()
report = report.strip()
if report:
columns = ["__IGNORE__", "url", "description_url", "source_time", "__IGNORE__", "__IGNORE__", "__IGNORE__", "target"]
rows = csv.DictReader(StringIO.StringIO(report), fieldnames = columns)
for row in rows:
event = Event()
for key, value in row.items():
if key is "__IGNORE__":
continue
event.add(key, value)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = PhishTankParserBot(sys.argv[1])
bot.start()
| agpl-3.0 | Python |
29c422ea482b022bae07fcd389313152ef3d1c3c | Document Exception attributes, #15 | numberoverzero/bloop,numberoverzero/bloop | bloop/exceptions.py | bloop/exceptions.py | CONSTRAINT_FAILURE = "Failed to meet expected condition during {}"
NOT_MODIFIED = "Failed to modify some obects during {}"
TABLE_MISMATCH = "Existing table for model {} does not match expected"
UNBOUND = "Failed to {} unbound model. Did you forget to call engine.bind()?"
class ConstraintViolation(Exception):
"""Raised when a condition is not met during put/update/delete.
Attributes:
obj: The dict that was sent to dynamodb and failed some conditional
operation
"""
def __init__(self, operation, obj):
super().__init__(CONSTRAINT_FAILURE.format(operation))
self.obj = obj
class NotModified(Exception):
"""Raised when some objects are not loaded, saved, or deleted.
Attributes:
objects (list): the objects not modified
"""
def __init__(self, operation, objects):
super().__init__(NOT_MODIFIED.format(operation))
self.objects = objects
class TableMismatch(Exception):
"""Raised when binding a model to an existing table with the wrong schema.
Attributes:
model (:attr:`bloop.Engine.model`): The model that was trying to bind
expected (dict): The expected schema for the table
actual (dict): The actual schema of the table
"""
def __init__(self, model, expected, actual):
super().__init__(TABLE_MISMATCH.format(model))
self.model = model
self.expected = expected
self.actual = actual
class UnboundModel(Exception):
"""Raised when loading or dumping on a model before binding it to an engine
Attributes:
model (:attr:`bloop.Engine.model`): The model of the object being
loaded, or dumped
obj (object or None): The instance of the model that was being dumped,
or loaded into. If a new instance of the model was being created,
this will be None
"""
def __init__(self, operation, model, obj):
super().__init__(UNBOUND.format(operation))
self.model = model
self.obj = obj
| CONSTRAINT_FAILURE = "Failed to meet expected condition during {}"
NOT_MODIFIED = "Failed to modify some obects during {}"
TABLE_MISMATCH = "Existing table for model {} does not match expected"
UNBOUND = "Failed to {} unbound model. Did you forget to call engine.bind()?"
class ConstraintViolation(Exception):
""" Raised when a condition is not met during put/update/delete """
def __init__(self, operation, obj):
super().__init__(CONSTRAINT_FAILURE.format(operation))
self.obj = obj
class NotModified(Exception):
"""
Raised when some objects are not loaded, saved, or deleted.
self.objects contains the objects not modified
"""
def __init__(self, operation, objects):
super().__init__(NOT_MODIFIED.format(operation))
self.objects = objects
class TableMismatch(Exception):
"""
Raised when trying to bind a model against an existing table that
doesn't match the required attributes for the model's definition
"""
def __init__(self, model, expected, actual):
super().__init__(TABLE_MISMATCH.format(model))
self.model = model
self.expected = expected
self.actual = actual
class UnboundModel(Exception):
"""
Raised when attempting to load/dump a model before binding it to an engine
"""
def __init__(self, operation, model, obj):
super().__init__(UNBOUND.format(operation))
self.model = model
self.obj = obj
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.