commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
954e8b979fcb5a0a276c6dc56cce96904cb0198f | fix imports in token_sender | genome/flow-core,genome/flow-core,genome/flow-core | flow/commands/token_sender.py | flow/commands/token_sender.py | import logging
from flow.commands.base import CommandBase
from flow.petri.netbase import Token, SetTokenMessage
LOG = logging.getLogger(__name__)
class TokenSenderCommand(CommandBase):
def __init__(self, broker=None, storage=None, routing_key=None, exchange=None):
self.broker = broker
self.storage = storage
self.routing_key = routing_key
self.exchange = exchange
def send_token(self, net_key=None, place_idx=None, data=None):
self.broker.connect()
token = Token.create(self.storage, data=data, data_type="output")
LOG.info("Sending command response token %s to net %s, place %r",
token.key, net_key, place_idx)
message = SetTokenMessage(net_key=net_key, place_idx=place_idx,
token_key=token.key)
self.broker.publish(exchange_name=self.exchange, routing_key=self.routing_key,
message=message)
self.broker.disconnect()
| import logging
from flow.commands.base import CommandBase
from flow.petri.safenet import Token, SetTokenMessage
LOG = logging.getLogger(__name__)
class TokenSenderCommand(CommandBase):
def __init__(self, broker=None, storage=None, routing_key=None, exchange=None):
self.broker = broker
self.storage = storage
self.routing_key = routing_key
self.exchange = exchange
def send_token(self, net_key=None, place_idx=None, data=None):
self.broker.connect()
token = Token.create(self.storage, data=data, data_type="output")
LOG.info("Sending command response token %s to net %s, place %r",
token.key, net_key, place_idx)
message = SetTokenMessage(net_key=net_key, place_idx=place_idx,
token_key=token.key)
self.broker.publish(exchange_name=self.exchange, routing_key=self.routing_key,
message=message)
self.broker.disconnect()
| agpl-3.0 | Python |
bec5402ed810bb2a0c8e432096e26f4a17405b8c | refactor to use RequestFactory | sounay/flaminggo-test,ehealthafrica-ci/onadata,SEL-Columbia/formhub,wesley1001/formhub,qlands/onadata,mainakibui/kobocat,wesley1001/formhub,kobotoolbox/kobocat,GeoODK/onadata,awemulya/fieldsight-kobocat,eHealthAfrica/onadata,spatialdev/onadata,eHealthAfrica/formhub,ultimateprogramer/formhub,makinacorpus/formhub,kobotoolbox/kobocat,awemulya/fieldsight-kobocat,piqoni/onadata,GeoODK/onadata,jomolinare/kobocat,ultimateprogramer/formhub,GeoODK/onadata,jomolinare/kobocat,SEL-Columbia/formhub,eHealthAfrica/onadata,GeoODK/formhub,hnjamba/onaclone,ehealthafrica-ci/formhub,SEL-Columbia/formhub,eHealthAfrica/formhub,kobotoolbox/kobocat,qlands/onadata,ehealthafrica-ci/formhub,jomolinare/kobocat,awemulya/fieldsight-kobocat,makinacorpus/formhub,spatialdev/onadata,smn/onadata,piqoni/onadata,GeoODK/formhub,wesley1001/formhub,ehealthafrica-ci/onadata,sounay/flaminggo-test,eHealthAfrica/formhub,spatialdev/onadata,piqoni/onadata,sounay/flaminggo-test,GeoODK/formhub,ehealthafrica-ci/formhub,qlands/onadata,makinacorpus/formhub,smn/onadata,jomolinare/kobocat,SEL-Columbia/formhub,hnjamba/onaclone,hnjamba/onaclone,wesley1001/formhub,mainakibui/kobocat,smn/onadata,eHealthAfrica/onadata,sounay/flaminggo-test,ultimateprogramer/formhub,GeoODK/onadata,spatialdev/onadata,mainakibui/kobocat,eHealthAfrica/formhub,kobotoolbox/kobocat,piqoni/onadata,smn/onadata,makinacorpus/formhub,ehealthafrica-ci/formhub,hnjamba/onaclone,ehealthafrica-ci/onadata,ehealthafrica-ci/onadata,mainakibui/kobocat,qlands/onadata,awemulya/fieldsight-kobocat,ultimateprogramer/formhub | api/tests/test_users_api.py | api/tests/test_users_api.py | import json
from api.tests.test_api import TestAPICase
from api.views import UserViewSet
class TestUsersAPI(TestAPICase):
def setUp(self):
super(TestUsersAPI, self).setUp()
def test_user_list(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/', **self.extra)
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| import json
from api.tests.test_api import IntegrationTestAPICase
class IntegrationTestUserAPI(IntegrationTestAPICase):
def test_user_list(self):
self._login_user_and_profile()
response = self.client.get('/api/v1/users')
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
self._login_user_and_profile()
response = self.client.get('/api/v1/users/bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| bsd-2-clause | Python |
991dc8c75433f2c94950b55fbafc284d31c9b6c0 | Change section name | openchordcharts/web-api,openchordcharts/openchordcharts-api | mongoshell.py | mongoshell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import bson
from paste.deploy import loadapp
import pymongo # noqa
from openchordcharts_api import environment
from openchordcharts_api.model import Account, Chart # noqa
base_dir = os.path.abspath(os.path.dirname(__file__))
conf_file_name = 'production.ini' if os.path.exists('production.ini') else 'development.ini'
conf_file_path = os.path.join(base_dir, conf_file_name)
app = loadapp(u'config:{}#main'.format(conf_file_path))
db = environment.db
ObjectId = bson.ObjectId
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import bson
from paste.deploy import loadapp
import pymongo # noqa
from openchordcharts_api import environment
from openchordcharts_api.model import Account, Chart # noqa
base_dir = os.path.abspath(os.path.dirname(__file__))
conf_file_name = 'production.ini' if os.path.exists('production.ini') else 'development.ini'
conf_file_path = os.path.join(base_dir, conf_file_name)
app = loadapp(u'config:{}#api'.format(conf_file_path))
db = environment.db
ObjectId = bson.ObjectId
| agpl-3.0 | Python |
7df49846779392fef9bbb9f5387cfe5bc53b73ec | Update individual.py | atallah-lab/simple_simulation,atallah-lab/simple_simulation | src/individual.py | src/individual.py | # -*- coding: utf-8 -*-
import src.constants as c
from src.allele import Allele
class Individual(object):
def __init__(self, loci):
self.alleles = loci.alleles
def get_genotype(self):
result = ''
for a in self.alleles:
result = result + a.name
return result
def get_fitness(self):
full_fitness = 1
for a in self.alleles:
full_fitness = full_fitness * a.fitness
return full_fitness
| import src.constants as c
from src.allele import Allele
class Individual(object):
def __init__(self, loci):
self.alleles = loci.alleles
def get_genotype(self):
result = ''
for a in self.alleles:
result = result + a.name
return result
def get_fitness(self):
full_fitness = 1
for a in self.alleles:
full_fitness = full_fitness * a.fitness
return full_fitness
| mit | Python |
069ff878186ef20767dc580a34c16ab54eeda147 | Adjust reinv_bad_ack test case now that we send ACK before we call final response handler. | sippy/voiptests,sippy/voiptests | test_cases/reinv_bad_ack.py | test_cases/reinv_bad_ack.py | # Copyright (c) 2018 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.t1 import a_test1, b_test1
from test_cases.reinvite import test_reinvite
class a_test_reinv_bad_ack(test_reinvite, a_test1):
cld = 'bob_reinv_bad_ack'
cli = 'alice_reinv_bad_ack'
compact_sip = False
disconnect_ival = 50
def __init__(self, *args):
self.reinvite_ival = 1
a_test1.__init__(self, *args)
def reinvite(self, ua, alter_port = True):
test_reinvite.reinvite(self, ua, alter_port)
if not self.connect_done or self.disconnect_done:
return
ua.tr.uack = True
def on_reinvite_connected(self, ua):
ua.tr.ack.getHFBody('cseq').cseq = 0
ua.global_config['_sip_tm'].sendACK(ua.tr)
def disconnect(self, ua):
if not self.disconnect_done:
self.nerrs += 1
# The ACK in the re-INVITE transaction is sent with wrong CSeq so
# the call should have been interrupted early.
raise ValueError('%s: The call was not interrupted early.' % self.my_name())
a_test1.disconnect(self, ua)
class b_test_reinv_bad_ack(test_reinvite, b_test1):
cli = a_test_reinv_bad_ack.cld
compact_sip = True
ring_ival = 1.0
answer_ival = 5.0
disconnect_ival = 60
| # Copyright (c) 2018 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.t1 import a_test1, b_test1
from test_cases.reinvite import test_reinvite
class a_test_reinv_bad_ack(test_reinvite, a_test1):
cld = 'bob_reinv_bad_ack'
cli = 'alice_reinv_bad_ack'
compact_sip = False
disconnect_ival = 50
def __init__(self, *args):
self.reinvite_ival = 1
a_test1.__init__(self, *args)
def on_reinvite_connected(self, ua):
ua.tr.ack.getHFBody('cseq').cseq = 0
def disconnect(self, ua):
if not self.disconnect_done:
self.nerrs += 1
# The ACK in the re-INVITE transaction is sent with wrong CSeq so
# the call should have been interrupted early.
raise ValueError('%s: The call was not interrupted early.' % self.my_name())
a_test1.disconnect(self, ua)
class b_test_reinv_bad_ack(test_reinvite, b_test1):
cli = a_test_reinv_bad_ack.cld
compact_sip = True
ring_ival = 1.0
answer_ival = 5.0
disconnect_ival = 60
| bsd-2-clause | Python |
78a03f0b0cbc948a6c9fb215e9051d099c528a82 | Change page title and heading | devinit/grand-bargain-monitoring,devinit/grand-bargain-monitoring,devinit/grand-bargain-monitoring | src/app.py | src/app.py | from flask import Flask
from flask import render_template, url_for
import parse_data
app = Flask(__name__)
@app.route("/dashboard")
def dashboard():
data = parse_data.load_and_format_data()
title = 'Grand Bargain Transparency Dashboard'
return render_template('dashboard.html', data=data, heading=title, page_title=title)
if __name__ == "__main__":
app.run()
| from flask import Flask
from flask import render_template, url_for
import parse_data
app = Flask(__name__)
@app.route("/dashboard")
def dashboard():
data = parse_data.load_and_format_data()
title = 'Grand Bargain Monitoring'
return render_template('dashboard.html', data=data, heading=title, page_title=title)
if __name__ == "__main__":
app.run()
| mit | Python |
fa3626c0dab4befc217ef4c5558ff0b37390a8bc | Update contrib/linux/actions/service.py to work for CentOS Stream | nzlosh/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,Plexxi/st2,StackStorm/st2 | contrib/linux/actions/service.py | contrib/linux/actions/service.py | #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: This script file utilizes remote-shell-script runner which means it copied as-is to the
remote host and executed using Python binary available on that systems.
This means it doesn't use pack or StackStorm specific virtual environment which means we can't
rely on any 3rd party dependencies.
"""
import re
import sys
import os
import platform
import subprocess
from st2common.util.shell import quote_unix
def get_linux_distribution():
# platform.linux_distribution() is not available in Python >= 3.8
if hasattr(platform, "linux_distribution"):
distro = platform.linux_distribution()[0] # pylint: disable=no-member
else:
# Fall back to shelling out to lsb_release
result = subprocess.run(
"lsb_release -i -s", shell=True, check=True, stdout=subprocess.PIPE
)
distro = result.stdout.decode("utf-8").strip()
if not distro:
raise ValueError("Fail to detect distribution we are running on")
return distro
if len(sys.argv) < 3:
raise ValueError("Usage: service.py <action> <service>")
distro = get_linux_distribution()
args = {"act": quote_unix(sys.argv[1]), "service": quote_unix(sys.argv[2])}
print("Detected distro: %s" % (distro))
if re.search(distro, "Ubuntu"):
if os.path.isfile("/etc/init/%s.conf" % args["service"]):
cmd_args = ["service", args["service"], args["act"]]
elif os.path.isfile("/etc/init.d/%s" % args["service"]):
cmd_args = ["/etc/init.d/%s" % (args["service"]), args["act"]]
else:
print("Unknown service")
sys.exit(2)
elif (
re.search(distro, "Redhat")
or re.search(distro, "Fedora")
or re.search(distro, "CentOS")
or re.search(distro, "Rocky Linux")
):
cmd_args = ["systemctl", args["act"], args["service"]]
subprocess.call(cmd_args, shell=False)
| #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: This script file utilizes remote-shell-script runner which means it copied as-is to the
remote host and executed using Python binary available on that systems.
This means it doesn't use pack or StackStorm specific virtual environment which means we can't
rely on any 3rd party dependencies.
"""
import re
import sys
import os
import platform
import subprocess
from st2common.util.shell import quote_unix
def get_linux_distribution():
# platform.linux_distribution() is not available in Python >= 3.8
if hasattr(platform, "linux_distribution"):
distro = platform.linux_distribution()[0] # pylint: disable=no-member
else:
# Fall back to shelling out to lsb_release
result = subprocess.run(
"lsb_release -i -s", shell=True, check=True, stdout=subprocess.PIPE
)
distro = result.stdout.decode("utf-8").strip()
if not distro:
raise ValueError("Fail to detect distribution we are running on")
return distro
if len(sys.argv) < 3:
raise ValueError("Usage: service.py <action> <service>")
distro = get_linux_distribution()
args = {"act": quote_unix(sys.argv[1]), "service": quote_unix(sys.argv[2])}
print("Detected distro: %s" % (distro))
if re.search(distro, "Ubuntu"):
if os.path.isfile("/etc/init/%s.conf" % args["service"]):
cmd_args = ["service", args["service"], args["act"]]
elif os.path.isfile("/etc/init.d/%s" % args["service"]):
cmd_args = ["/etc/init.d/%s" % (args["service"]), args["act"]]
else:
print("Unknown service")
sys.exit(2)
elif (
re.search(distro, "Redhat")
or re.search(distro, "Fedora")
or re.search(distro, "CentOS Linux")
or re.search(distro, "Rocky Linux")
):
cmd_args = ["systemctl", args["act"], args["service"]]
subprocess.call(cmd_args, shell=False)
| apache-2.0 | Python |
a3feca50082d190eb39969b6494f651d9c0e58a8 | remove link to private documentation | quora/asynq,manannayak/asynq | asynq/__init__.py | asynq/__init__.py | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__doc__ = """
Asynq is a framework for asynchronous programming in Python.
It supports futures, batching, and asynchronous contexts.
"""
__team__ = 'platform'
__reviewer__ = 'jelle, manan'
from . import debug
from . import futures
from . import batching
from . import async_task
from . import scheduler
from . import decorators
from . import utils
from . import contexts
from . import scoped_value
from . import tools
from .futures import FutureBase, Future, FutureIsAlreadyComputed, none_future, ConstFuture, \
ErrorFuture
from .batching import BatchBase, BatchItemBase, BatchingError, BatchCancelledError
from .async_task import AsyncTask, AsyncTaskCancelledError, AsyncTaskResult
from .scheduler import TaskScheduler, get_scheduler, get_active_task, set_scheduler, \
AsyncTaskError
from .decorators import async, async_proxy, cached, has_async_fn, \
is_pure_async_fn, is_async_fn, get_async_fn, get_async_or_sync_fn, async_call, \
make_async_decorator
from .utils import await, result
from .contexts import NonAsyncContext, AsyncContext
from .scoped_value import AsyncScopedValue, async_override
from . import mock_ as mock
from .generator import END_OF_GENERATOR, async_generator, list_of_generator, take_first, Value
debug.sync = batching.sync
debug.attach_exception_hook()
| # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__doc__ = """
Asynq is a framework for asynchronous programming in Python.
It supports futures, batching, and asynchronous contexts.
For usage documentation, see:
https://quora.quip.com/5tbJAZlr5fGH
"""
__team__ = 'platform'
__reviewer__ = 'jelle, manan'
from . import debug
from . import futures
from . import batching
from . import async_task
from . import scheduler
from . import decorators
from . import utils
from . import contexts
from . import scoped_value
from . import tools
from .futures import FutureBase, Future, FutureIsAlreadyComputed, none_future, ConstFuture, \
ErrorFuture
from .batching import BatchBase, BatchItemBase, BatchingError, BatchCancelledError
from .async_task import AsyncTask, AsyncTaskCancelledError, AsyncTaskResult
from .scheduler import TaskScheduler, get_scheduler, get_active_task, set_scheduler, \
AsyncTaskError
from .decorators import async, async_proxy, cached, has_async_fn, \
is_pure_async_fn, is_async_fn, get_async_fn, get_async_or_sync_fn, async_call, \
make_async_decorator
from .utils import await, result
from .contexts import NonAsyncContext, AsyncContext
from .scoped_value import AsyncScopedValue, async_override
from . import mock_ as mock
from .generator import END_OF_GENERATOR, async_generator, list_of_generator, take_first, Value
debug.sync = batching.sync
debug.attach_exception_hook()
| apache-2.0 | Python |
e97dc71511edc9f835a183ad3d674ca7e15d55b6 | Add initial argparser entry point | Phelimb/atlas,Phelimb/atlas | atlas/__init__.py | atlas/__init__.py | from version import __version__
| mit | Python | |
b100474ff7cf229f43e1bec314dc6ad548553841 | decrease verbosity | altai/focus,altai/focus,altai/focus | C4GD_web/flask_memcache_session.py | C4GD_web/flask_memcache_session.py | # credits to https://github.com/unk2k/
import os
import flask
class SessionData(dict, flask.sessions.SessionMixin):
pass
class Session(flask.sessions.SessionInterface):
session_class = SessionData
def open_session(self, app, request):
self.cookie_session_id = request.cookies.get(
app.session_cookie_name, None)
self.session_new = False
if self.cookie_session_id is None:
self.cookie_session_id = os.urandom(40).encode('hex')
self.session_new = True
self.memcache_session_id = '@'.join(
[
request.remote_addr,
self.cookie_session_id
]
)
session = app.cache.get(self.memcache_session_id) or {}
app.cache.set(self.memcache_session_id, session)
return self.session_class(session)
def save_session(self, app, session, response):
expires = self.get_expiration_time(app, session)
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
app.cache.set(self.memcache_session_id, session)
if self.session_new:
response.set_cookie(
app.session_cookie_name, self.cookie_session_id, path=path,
expires=expires, httponly=httponly,
secure=secure, domain=domain)
| # credits to https://github.com/unk2k/
import os
import flask
class SessionData(dict, flask.sessions.SessionMixin):
pass
class Session(flask.sessions.SessionInterface):
session_class = SessionData
def open_session(self, app, request):
self.cookie_session_id = request.cookies.get(
app.session_cookie_name, None)
self.session_new = False
if self.cookie_session_id is None:
self.cookie_session_id = os.urandom(40).encode('hex')
self.session_new = True
self.memcache_session_id = '@'.join(
[
request.remote_addr,
self.cookie_session_id
]
)
app.logger.debug('Open session %s', self.memcache_session_id)
session = app.cache.get(self.memcache_session_id) or {}
app.cache.set(self.memcache_session_id, session)
return self.session_class(session)
def save_session(self, app, session, response):
expires = self.get_expiration_time(app, session)
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
app.cache.set(self.memcache_session_id, session)
if self.session_new:
response.set_cookie(
app.session_cookie_name, self.cookie_session_id, path=path,
expires=expires, httponly=httponly,
secure=secure, domain=domain)
app.logger.debug(
'Set session %s with %s', self.memcache_session_id, session)
| lgpl-2.1 | Python |
c9ebc3556572e1fa905949c48ac74e58d3a71521 | fix quickstart | ArneBab/gamification-engine,ActiDoo/gamification-engine,ActiDoo/gamification-engine,ArneBab/gamification-engine,ActiDoo/gamification-engine,ActiDoo/gamification-engine,ArneBab/gamification-engine | gengine/scripts/quickstart.py | gengine/scripts/quickstart.py | # -*- coding: utf-8 -*-
import os
import sys
import gengine
import shutil
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <directory>\n' % (cmd,))
sys.exit(1)
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
except shutil.Error as e:
print('Error: %s' % e)
except OSError as e:
print('Error: %s' % e)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
directory = argv[1]
if not os.path.exists(directory):
#copy files
quickstart_template_path = os.path.join(os.path.dirname(gengine.__path__[0]), "gengine_quickstart_template")
copyDirectory(quickstart_template_path, directory)
else:
print "directory already exists"
| # -*- coding: utf-8 -*-
import os
import sys
import gengine
import shutil
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <directory>\n' % (cmd,))
sys.exit(1)
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
except shutil.Error as e:
print('Error: %s' % e)
except OSError as e:
print('Error: %s' % e)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
directory = argv[1]
if not os.path.exists(directory):
#copy files
quickstart_template_path = os.path.join(os.path.dirname(gengine.__path__[0]), "quickstart_template")
copyDirectory(quickstart_template_path, directory)
else:
print "directory already exists" | mit | Python |
15f51604f82b88ba5a25f04e9c610971f55d0b5a | Fix use of deprecated pandas.core.common | has2k1/plotnine,has2k1/plotnine | ggplotx/stats/stat_boxplot.py | ggplotx/stats/stat_boxplot.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from matplotlib.cbook import boxplot_stats
from ..utils import resolution
from ..utils.doctools import document
from .stat import stat
@document
class stat_boxplot(stat):
"""
Compute boxplot statistics
{documentation}
See Also
--------
:class:`~ggplotx.geoms.geom_boxplot`
"""
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'boxplot', 'position': 'dodge',
'coef': 1.5}
def setup_params(self, data):
if 'width' not in self.params:
self.params['width'] = resolution(data['x'], False) * 0.75
return self.params
@classmethod
def compute_group(cls, data, scales, **params):
labels = ['x', 'y']
X = np.array(data[labels])
res = boxplot_stats(X, whis=params['coef'], labels=labels)[1]
try:
n = data['weight'].sum()
except KeyError:
n = len(data['y'])
if len(np.unique(data['x'])) > 1:
width = np.ptp(data['x']) * 0.9
else:
width = params['width']
if pdtypes.is_categorical(data['x']):
x = data['x'].iloc[0]
else:
x = np.mean([data['x'].min(), data['x'].max()])
d = {'ymin': res['whislo'],
'lower': res['q1'],
'middle': [res['med']],
'upper': res['q3'],
'ymax': res['whishi'],
'outliers': [res['fliers']],
'notchupper': res['med']+1.58*res['iqr']/np.sqrt(n),
'notchlower': res['med']-1.58*res['iqr']/np.sqrt(n),
'x': x,
'width': width,
'relvarwidth': np.sqrt(n)}
return pd.DataFrame(d)
| from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import pandas.core.common as com
from matplotlib.cbook import boxplot_stats
from ..utils import resolution
from ..utils.doctools import document
from .stat import stat
@document
class stat_boxplot(stat):
"""
Compute boxplot statistics
{documentation}
See Also
--------
:class:`~ggplotx.geoms.geom_boxplot`
"""
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'boxplot', 'position': 'dodge',
'coef': 1.5}
def setup_params(self, data):
if 'width' not in self.params:
self.params['width'] = resolution(data['x'], False) * 0.75
return self.params
@classmethod
def compute_group(cls, data, scales, **params):
labels = ['x', 'y']
X = np.array(data[labels])
res = boxplot_stats(X, whis=params['coef'], labels=labels)[1]
try:
n = data['weight'].sum()
except KeyError:
n = len(data['y'])
if len(np.unique(data['x'])) > 1:
width = np.ptp(data['x']) * 0.9
else:
width = params['width']
if com.is_categorical(data['x']):
x = data['x'].iloc[0]
else:
x = np.mean([data['x'].min(), data['x'].max()])
d = {'ymin': res['whislo'],
'lower': res['q1'],
'middle': [res['med']],
'upper': res['q3'],
'ymax': res['whishi'],
'outliers': [res['fliers']],
'notchupper': res['med']+1.58*res['iqr']/np.sqrt(n),
'notchlower': res['med']-1.58*res['iqr']/np.sqrt(n),
'x': x,
'width': width,
'relvarwidth': np.sqrt(n)}
return pd.DataFrame(d)
| mit | Python |
a7f79a984f632bd39dd7bc820f6e51943da42f3f | remove pdb | fatrix/django-golive,fatrix/django-golive | golive/management/__init__.py | golive/management/__init__.py | from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory
import yaml
class CoreCommand(BaseCommand):
env_id = '<env_id>'
help = 'Manage the given environment'
output['stdout'] = False
def get_config_value(self, key):
return self.config[key]
def handle(self, *args, **options):
job = sys.argv[1]
if len(args) < 1:
self.stderr.write('Missing env_id\n')
sys.exit(1)
# load user config
environment_configfile = open("golive.yml", 'r')
stackname = self.environment_config_temp = yaml.load(environment_configfile)['CONFIG']['STACK']
self.stack = StackFactory.get(stackname)
self.stack.setup_environment(args[0])
# task decision
task = None
if 'task' in options:
task = options['task']
# role decision
role = None
if 'role' in options.keys():
role = options['role']
# execute
self.stack.do(job, task=task, role=role)
def end(self):
self.stdout.write('Done\n')
| from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory
import yaml
class CoreCommand(BaseCommand):
env_id = '<env_id>'
help = 'Manage the given environment'
output['stdout'] = False
def get_config_value(self, key):
return self.config[key]
def handle(self, *args, **options):
job = sys.argv[1]
if len(args) < 1:
self.stderr.write('Missing env_id\n')
sys.exit(1)
# load user config
environment_configfile = open("golive.yml", 'r')
stackname = self.environment_config_temp = yaml.load(environment_configfile)['CONFIG']['STACK']
import pdb; pdb.set_trace()
self.stack = StackFactory.get(stackname)
self.stack.setup_environment(args[0])
# task decision
task = None
if 'task' in options:
task = options['task']
# role decision
role = None
if 'role' in options.keys():
role = options['role']
# execute
self.stack.do(job, task=task, role=role)
def end(self):
self.stdout.write('Done\n')
| bsd-2-clause | Python |
ab273079a494827e085fed2f6fdf548992df3183 | Update population.py | atallah-lab/simple_simulation,atallah-lab/simple_simulation | src/population.py | src/population.py | # -*- coding: utf-8 -*-
import src.constants as c
from src.allele import Allele
from src.locus import Locus
from src.individual import Individual
class Population(object):
def __init__(self, size=10):
self.size = size
def create_population(self):
individuals = []
for i in range(0, self.size):
loci = Locus(str(i), randomAlleles=True)
individuals.append(Individual(loci))
return individuals
def get_alleles_as_list(self, population): #TODO case without population.
allele_list = []
for individual in population:
for allele in individual.alleles:
allele_list.append(allele)
return allele_list
def get_allele_frequency(self, target_allele, individuals=None):
allele_count = 0
numerator = 1
if individuals is None:
individuals = self.create_population()
population_list = self.get_alleles_as_list(individuals)
for allele in population_list:
name, fitness = allele
if name == target_allele:
allele_count += 1
return (allele_count / len(population_list))
| # -*- coding: utf-8 -*-
import src.constants as c
from src.allele import Allele
from src.locus import Locus
from src.individual import Individual
class Population(object):
def __init__(self, size=10):
self.size = size
def create_population(self):
individuals = []
for i in range(0, self.size):
loci = Locus(str(i), randomAlleles=True)
individuals.append(Individual(loci))
return individuals
def get_alleles_as_list(self, population): #TODO case without population.
allele_list = []
for individual in population:
for allele in individual.alleles:
allele_list.append(allele)
return allele_list
def get_allele_frequency(self, target_allele, individuals=None):
allele_count = 0
numerator = 1
if individuals is None:
individuals = self.create_population()
print ('individuals is none, setting that up...')
population_list = self.get_alleles_as_list(individuals)
for allele in population_list:
name, fitness = allele
if name == target_allele:
allele_count += 1
return (allele_count / len(population_list))
| mit | Python |
82eacf3c8ebfbe4185768e54fc443f088336747b | Use commit time, not author time. | qwhelan/asv,ericdill/asv,edisongustavo/asv,ericdill/asv,pv/asv,airspeed-velocity/asv,edisongustavo/asv,cpcloud/asv,mdboom/asv,spacetelescope/asv,mdboom/asv,waylonflinn/asv,edisongustavo/asv,giltis/asv,spacetelescope/asv,airspeed-velocity/asv,waylonflinn/asv,airspeed-velocity/asv,qwhelan/asv,pv/asv,cpcloud/asv,cpcloud/asv,qwhelan/asv,ericdill/asv,ericdill/asv,mdboom/asv,spacetelescope/asv,waylonflinn/asv,pv/asv,spacetelescope/asv,mdboom/asv,airspeed-velocity/asv,giltis/asv,qwhelan/asv,giltis/asv,pv/asv | asv/repo.py | asv/repo.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import os
from .console import console
from . import util
class Repo(object):
def __init__(self, url, path):
self.git = util.which("git")[0]
self.path = path
if not os.path.exists(self.path):
console.message("Cloning project", "green")
self._run_git(['clone', url, self.path], chdir=False)
console.message("Fetching recent changes", "green")
self._run_git(['fetch', 'origin'])
self.checkout('origin/master')
def _run_git(self, args, chdir=True):
if chdir:
orig_dir = os.getcwd()
os.chdir(self.path)
try:
return util.check_output(
[self.git] + args)
finally:
if chdir:
os.chdir(orig_dir)
def checkout(self, branch):
self._run_git(['checkout', branch])
def clean(self):
self._run_git(['clean', '-fxd'])
def get_branches(self, spec, steps=None):
if '..' in spec:
start, end = spec.split('..')
def get_date(self, hash):
return int(self._run_git(
['show', hash, '--quiet', '--format=format:%ct']).strip())
def get_hashes_from_range(self, range):
return self._run_git(
['log', '--quiet', '--format=format:%H', range]
).strip().split()
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import os
from .console import console
from . import util
class Repo(object):
def __init__(self, url, path):
self.git = util.which("git")[0]
self.path = path
if not os.path.exists(self.path):
console.message("Cloning project", "green")
self._run_git(['clone', url, self.path], chdir=False)
console.message("Fetching recent changes", "green")
self._run_git(['fetch', 'origin'])
self.checkout('origin/master')
def _run_git(self, args, chdir=True):
if chdir:
orig_dir = os.getcwd()
os.chdir(self.path)
try:
return util.check_output(
[self.git] + args)
finally:
if chdir:
os.chdir(orig_dir)
def checkout(self, branch):
self._run_git(['checkout', branch])
def clean(self):
self._run_git(['clean', '-fxd'])
def get_branches(self, spec, steps=None):
if '..' in spec:
start, end = spec.split('..')
def get_date(self, hash):
return int(self._run_git(
['show', hash, '--quiet', '--format=format:%at']).strip())
def get_hashes_from_range(self, range):
return self._run_git(
['log', '--quiet', '--format=format:%H', range]
).strip().split()
| bsd-3-clause | Python |
9172bea5e6698e7c1848111b703656e12ee475f0 | fix coding style : add newlines | buildtimetrend/python-lib | buildtimetrend/settings.py | buildtimetrend/settings.py | # vim: set expandtab sw=4 ts=4:
'''
Manages settings of buildtime trend
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import buildtimetrend
VERBOSITY = 1
def get_project_name():
'''
Get project name
'''
# use Travis repo slug as project name
if 'TRAVIS_REPO_SLUG' in os.environ:
return os.getenv('TRAVIS_REPO_SLUG')
return "None"
def get_project_info():
'''
Get project info as a dictonary
'''
return {
"version": buildtimetrend.VERSION,
"schema_version": buildtimetrend.SCHEMA_VERSION,
"project_name": str(get_project_name())
}
def print_verbose(string, level=1):
'''
Print wrapper, taking verbosity level into account.
Param string string to be printed
Param level verbosity level at which a string should be printed, default = 1
'''
# print string if level is equal or lower to verbosity level
if level <= VERBOSITY:
print string
| # vim: set expandtab sw=4 ts=4:
'''
Manages settings of buildtime trend
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import buildtimetrend
VERBOSITY = 1
def get_project_name():
'''
Get project name
'''
# use Travis repo slug as project name
if 'TRAVIS_REPO_SLUG' in os.environ:
return os.getenv('TRAVIS_REPO_SLUG')
return "None"
def get_project_info():
'''
Get project info as a dictonary
'''
return {
"version": buildtimetrend.VERSION,
"schema_version": buildtimetrend.SCHEMA_VERSION,
"project_name": str(get_project_name())
}
def print_verbose(string, level=1):
'''
Print wrapper, taking verbosity level into account.
Param string string to be printed
Param level verbosity level at which a string should be printed, default = 1
'''
# print string if level is equal or lower to verbosity level
if level <= VERBOSITY:
print string
| agpl-3.0 | Python |
242b0cdcdfa3ebb5a5691b964cbb256de09534ee | Add CLI function for illumination correction | jni/microscopium,microscopium/microscopium,Don86/microscopium,jni/microscopium,Don86/microscopium,microscopium/microscopium,starcalibre/microscopium | husc/main.py | husc/main.py | #!/bin/env python
# standard library
import os
import sys
import argparse
# dependencies
from skimage import io as skimio
# local imports
from . import preprocess as pre
from . import io
parser = argparse.ArgumentParser(description="Run the HUSC functions.")
subpar = parser.add_subparsers()
stitch = subpar.add_parser('stitch',
help="Stitch four quadrants into one image.")
stitch.add_argument('quadrant_images', nargs=4, metavar='IM',
help="The images for each quadrant in order: NW, NE, " +
"SW, SE.")
stitch.add_argument('output_image',
help="The filename for the stitched image.")
illum = subpar.add_parser('illum',
help="Estimate and correct illumination.")
illum.add_argument('images', nargs='+',
help="The input images.")
illum.add_argument('-o', '--output-suffix',
default='.illum.tif', metavar='SUFFIX',
help="What suffix to attach to the corrected images.")
illum.add_argument('-q', '--quantile', metavar='[0.0-1.0]', default=0.05,
help='Use this quantile to determine illumination.')
illum.add_argument('-r', '--radius', metavar='INT', default=51,
help='Radius in which to find quantile.')
def get_command(argv):
"""Get the command name used from the command line.
Parameters
----------
argv : [string]
The argument vector.
Returns
-------
cmd : string
The command name.
"""
return argv[1]
def main():
"""Run the command-line interface."""
args = parser.parse_args()
cmd = get_command(sys.argv)
if cmd == 'illum':
run_illum(args)
elif cmd == 'stitch':
raise NotImplementedError('stitch not yet implemented.')
def run_illum(args):
"""Run illumination correction.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
Returns
-------
None
"""
ims = (skimio.imread(fn) for fn in args.images)
il = pre.find_background_illumination(ims, args.radius, args.quantile)
if args.save_illumination is not None:
io.imsave(il)
base_fns = (os.path.splitext(fn)[0] for fn in args.images)
ims_out = (fn + args.output_suffix for fn in base_fns)
ims = (skimio.imread(fn) for fn in args.images)
for im, fout in zip(ims, ims_out):
pre.correct_image_illumination(im, il, True)
io.imsave(im, fout)
if __name__ == '__main__':
main()
| import argparse
parser = argparse.ArgumentParser(description="Run the HUSC functions.")
subpar = parser.add_subparsers()
stitch = subpar.add_parser('stitch',
help="Stitch four quadrants into one image.")
stitch.add_argument('quadrant_image', nargs=4, metavar='IM',
help="The images for each quadrant in order: NW, NE, " +
"SW, SE.")
stitch.add_argument('output_image',
help="The filename for the stitched image.")
illum = subpar.add_parser('illum',
help="Estimate and correct illumination.")
illum.add_argument('images', nargs='+',
help="The input images.")
illum.add_argument('-o', '--output-suffix',
default='.illum.tif', metavar='SUFFIX',
help="What suffix to attach to the corrected images.")
def main():
"""Fetch commands from the command line."""
args = parser.parse_args()
print args
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
56a653806f72d701132a930d7583397aba152120 | Add hosts to the admin. | bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv | avalonstar/apps/broadcasts/admin.py | avalonstar/apps/broadcasts/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Broadcast, Highlight, Host, Raid, Series
class HighlightInline(admin.StackedInline):
extra = 1
model = Highlight
class HostInline(admin.TabularInline):
extra = 1
model = Host
class RaidInline(admin.TabularInline):
extra = 1
model = Raid
class BroadcastAdmin(admin.ModelAdmin):
inlines = [RaidInline, HighlightInline]
list_display = ['number', 'airdate', 'status', 'series', 'game_list']
list_editable = ['airdate', 'status', 'series']
list_display_links = ['number']
raw_id_fields = ['games', 'series']
autocomplete_lookup_fields = {
'fk': ['series'],
'm2m': ['games']
}
def game_list(self, obj):
return ", ".join([g.name for g in obj.games.all()])
admin.site.register(Broadcast, BroadcastAdmin)
class HighlightAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (('broadcast', 'twid'),)}),
('Details', {'fields': ('title', 'description', 'game', 'url')})
)
list_display = ['title', 'broadcast', 'game', 'twid', 'url']
list_display_links = ['title', 'broadcast']
raw_id_fields = ['broadcast', 'game']
autocomplete_lookup_fields = {'fk': ['game']}
admin.site.register(Highlight, HighlightAdmin)
class HostAdmin(admin.ModelAdmin):
list_display = ['timestamp', 'hoster', 'broadcast']
admin.site.register(Host, HostAdmin)
class RaidAdmin(admin.ModelAdmin):
list_display = ['timestamp', 'broadcast', 'raider', 'game']
admin.site.register(Raid, RaidAdmin)
class SeriesAdmin(admin.ModelAdmin):
pass
admin.site.register(Series, SeriesAdmin)
| # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Broadcast, Highlight, Raid, Series
class HighlightInline(admin.StackedInline):
extra = 1
model = Highlight
class RaidInline(admin.TabularInline):
extra = 1
model = Raid
class BroadcastAdmin(admin.ModelAdmin):
inlines = [RaidInline, HighlightInline]
list_display = ['number', 'airdate', 'status', 'series', 'game_list']
list_editable = ['airdate', 'status', 'series']
list_display_links = ['number']
raw_id_fields = ['games', 'series']
autocomplete_lookup_fields = {
'fk': ['series'],
'm2m': ['games']
}
def game_list(self, obj):
return ", ".join([g.name for g in obj.games.all()])
admin.site.register(Broadcast, BroadcastAdmin)
class HighlightAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (('broadcast', 'twid'),)}),
('Details', {'fields': ('title', 'description', 'game', 'url')})
)
list_display = ['title', 'broadcast', 'game', 'twid', 'url']
list_display_links = ['title', 'broadcast']
raw_id_fields = ['broadcast', 'game']
autocomplete_lookup_fields = {'fk': ['game']}
admin.site.register(Highlight, HighlightAdmin)
class RaidAdmin(admin.ModelAdmin):
list_display = ['timestamp', 'broadcast', 'raider', 'game']
admin.site.register(Raid, RaidAdmin)
class SeriesAdmin(admin.ModelAdmin):
pass
admin.site.register(Series, SeriesAdmin)
| apache-2.0 | Python |
6b7e32c98fa8a11dcd7bbbadaa2a057e4ff0ce90 | Add function to create name strings with random characters | F5Networks/f5-openstack-test,pjbreaux/f5-openstack-test | f5_os_test/__init__.py | f5_os_test/__init__.py | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import string
__version__ = '0.2.0'
def random_name(prefix, N):
"""Creates a name with random characters.
Returns a new string created from an input prefix appended with a set of
random characters. The number of random characters appended to
the prefix string is defined by the N parameter. For example,
random_name('test_', 6) might return "test_FR3N5Y"
:param string prefix: String to append randoms characters.
:param int N: Number of random characters to append.
"""
return prefix + ''.join(
random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for _ in range(N))
| # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.2.0'
| apache-2.0 | Python |
5136de58efd0a8cef7802926d2076e00d9372b99 | change python interpreter to linux | Jailman/RaspberryPiRobot,Jailman/RaspberryPiRobot,Jailman/RaspberryPiRobot,Jailman/RaspberryPiRobot | Modules/mailman.py | Modules/mailman.py | #!/usr/bin/python
# coding:utf-8
__author__ = 'Jailman'
import smtplib
from email.mime.text import MIMEText
class MailToWarn():
def __init__(self, mailto_list, mail_host, mail_user, mail_pass):
self.mailto_list = mailto_list
self.mail_host = mail_host
self.mail_user = mail_user
self.mail_pass = mail_pass
def send_mail(self, content):
msg = MIMEText(content, _subtype='html', _charset='gb2312')
msg['Subject'] = "RaspberryPi Robot Mailer" # Email subject
msg['From'] = self.mail_user
# msg['To'] = self.mailto_list
strTo = self.mailto_list
msg['To'] = ','.join(strTo)
try:
s = smtplib.SMTP()
s.connect(self.mail_host) # SMTP server
s.login(self.mail_user, self.mail_pass) # login
s.sendmail(self.mail_user, self.mailto_list,
msg.as_string()) # sendmail
s.close()
return True
except Exception, e:
print str(e)
return False
def Mailer(list, content):
mail_host = 'smtp.sina.com'
mail_user = 'xxx@sina.com'
mail_pass = 'xxx'
mailer = MailToWarn(list, mail_host, mail_user, mail_pass)
if mailer.send_mail(content):
print 'Success'
else:
print 'Fail'
if __name__ == '__main__':
list = ['541197941@qq.com', 'jailman@sina.com']
content = 'Test Mail'
Mailer(list, content)
| #!C:\Python27\python.exe
# coding:utf-8
__author__ = 'Jailman'
import smtplib
from email.mime.text import MIMEText
class MailToWarn():
def __init__(self, mailto_list, mail_host, mail_user, mail_pass):
self.mailto_list = mailto_list
self.mail_host = mail_host
self.mail_user = mail_user
self.mail_pass = mail_pass
def send_mail(self, content):
msg = MIMEText(content, _subtype='html', _charset='gb2312')
msg['Subject'] = "RaspberryPi Robot Mailer" # Email subject
msg['From'] = self.mail_user
# msg['To'] = self.mailto_list
strTo = self.mailto_list
msg['To'] = ','.join(strTo)
try:
s = smtplib.SMTP()
s.connect(self.mail_host) # SMTP server
s.login(self.mail_user, self.mail_pass) # login
s.sendmail(self.mail_user, self.mailto_list,
msg.as_string()) # sendmail
s.close()
return True
except Exception, e:
print str(e)
return False
def Mailer(list, content):
mail_host = 'smtp.sina.com'
mail_user = 'xxx@sina.com'
mail_pass = 'xxx'
mailer = MailToWarn(list, mail_host, mail_user, mail_pass)
if mailer.send_mail(content):
print 'Success'
else:
print 'Fail'
if __name__ == '__main__':
list = ['541197941@qq.com', 'jailman@sina.com']
content = 'Test Mail'
Mailer(list, content)
| apache-2.0 | Python |
28df33d6383c680673351ea8fc8136bfef75c3f5 | Add a test for `identity_wrapper`. | nanshe-org/nanshe,nanshe-org/nanshe,jakirkham/nanshe,DudLab/nanshe,DudLab/nanshe,jakirkham/nanshe | tests/test_generic_decorators.py | tests/test_generic_decorators.py | __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Mar 25, 2015 13:30:52 EDT$"
import functools
import nanshe.nanshe.generic_decorators
class TestGenericDecorators(object):
def test_update_wrapper(self):
def wrapper(a_callable):
def wrapped(*args, **kwargs):
return(a_callable(*args, **kwargs))
return(wrapped)
def func(a, b=2):
return(a + b)
func_wrapped_1 = functools.update_wrapper(wrapper, func)
if not hasattr(func_wrapped_1, "__wrapped__"):
setattr(func_wrapped_1, "__wrapped__", func)
func_wrapped_2 = nanshe.nanshe.generic_decorators.update_wrapper(
wrapper, func
)
assert func_wrapped_1 == func_wrapped_2
def test_wraps(self):
def wrapper(a_callable):
def wrapped(*args, **kwargs):
return(a_callable(*args, **kwargs))
return(wrapped)
def func(a, b=2):
return(a + b)
func_wrapped_1 = functools.wraps(wrapper)(func)
if not hasattr(func_wrapped_1, "__wrapped__"):
setattr(func_wrapped_1, "__wrapped__", func)
func_wrapped_2 = nanshe.nanshe.generic_decorators.wraps(wrapper)(
func
)
assert func_wrapped_1 == func_wrapped_2
def test_identity_wrapper(self):
def func(a, b=2):
return(a + b)
func_wrapped = nanshe.nanshe.generic_decorators.identity_wrapper(
func
)
assert func_wrapped != func
assert not hasattr(func, "__wrapped__")
assert hasattr(func_wrapped, "__wrapped__")
assert func_wrapped.__wrapped__ == func
def test_static_variables(self):
def func(a, b=2):
return(a + b)
func = nanshe.nanshe.generic_decorators.static_variables(
c = 7
)(
func
)
assert hasattr(func, "c")
assert func.c == 7
| __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Mar 25, 2015 13:30:52 EDT$"
import functools
import nanshe.nanshe.generic_decorators
class TestGenericDecorators(object):
def test_update_wrapper(self):
def wrapper(a_callable):
def wrapped(*args, **kwargs):
return(a_callable(*args, **kwargs))
return(wrapped)
def func(a, b=2):
return(a + b)
func_wrapped_1 = functools.update_wrapper(wrapper, func)
if not hasattr(func_wrapped_1, "__wrapped__"):
setattr(func_wrapped_1, "__wrapped__", func)
func_wrapped_2 = nanshe.nanshe.generic_decorators.update_wrapper(
wrapper, func
)
assert func_wrapped_1 == func_wrapped_2
def test_wraps(self):
def wrapper(a_callable):
def wrapped(*args, **kwargs):
return(a_callable(*args, **kwargs))
return(wrapped)
def func(a, b=2):
return(a + b)
func_wrapped_1 = functools.wraps(wrapper)(func)
if not hasattr(func_wrapped_1, "__wrapped__"):
setattr(func_wrapped_1, "__wrapped__", func)
func_wrapped_2 = nanshe.nanshe.generic_decorators.wraps(wrapper)(
func
)
assert func_wrapped_1 == func_wrapped_2
def test_static_variables(self):
def func(a, b=2):
return(a + b)
func = nanshe.nanshe.generic_decorators.static_variables(
c = 7
)(
func
)
assert hasattr(func, "c")
assert func.c == 7
| bsd-3-clause | Python |
37a3ddcb4366e94e0ca78675e2545f889138c11f | Update fastai/_pytorch_doc.py | fastai/fastai | fastai/_pytorch_doc.py | fastai/_pytorch_doc.py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/99_pytorch_doc.ipynb (unless otherwise specified).
__all__ = ['PYTORCH_URL', 'pytorch_doc_link']
# Cell
from types import ModuleType
# Cell
PYTORCH_URL = 'https://pytorch.org/docs/stable/'
# Cell
def _mod2page(
mod:ModuleType, # A PyTorch module
) -> str:
"Get the webpage name for a PyTorch module"
if mod == Tensor: return 'tensors.html'
name = mod.__name__
name = name.replace('torch.', '').replace('utils.', '')
if name.startswith('nn.modules'): return 'nn.html'
return f'{name}.html'
# Cell
import importlib
# Cell
def pytorch_doc_link(
name:str, # Name of a pytorch module, class or function
) -> (str, None):
"Get the URL to the documentation of a pytorch module, class or function"
if name.startswith('F'): name = 'torch.nn.functional' + name[1:]
if not name.startswith('torch.'): name = 'torch.' + name
if name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html'
try:
mod = importlib.import_module(name)
return f'{PYTORCH_URL}{_mod2page(mod)}'
except: pass
splits = name.split('.')
mod_name,fname = '.'.join(splits[:-1]),splits[-1]
if mod_name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html#{name}'
try:
mod = importlib.import_module(mod_name)
page = _mod2page(mod)
return f'{PYTORCH_URL}{page}#{name}'
except: return None | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/99_pytorch_doc.ipynb (unless otherwise specified).
__all__ = ['PYTORCH_URL', 'pytorch_doc_link']
# Cell
from types import ModuleType
# Cell
PYTORCH_URL = 'https://pytorch.org/docs/stable/'
# Cell
def _mod2page(
mod:ModuleType, # A PyTorch module
) -> str:
"Get the web page name for a pytorch module"
if mod == Tensor: return 'tensors.html'
name = mod.__name__
name = name.replace('torch.', '').replace('utils.', '')
if name.startswith('nn.modules'): return 'nn.html'
return f'{name}.html'
# Cell
import importlib
# Cell
def pytorch_doc_link(
name:str, # Name of a pytorch module, class or function
) -> (str, None):
"Get the URL to the documentation of a pytorch module, class or function"
if name.startswith('F'): name = 'torch.nn.functional' + name[1:]
if not name.startswith('torch.'): name = 'torch.' + name
if name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html'
try:
mod = importlib.import_module(name)
return f'{PYTORCH_URL}{_mod2page(mod)}'
except: pass
splits = name.split('.')
mod_name,fname = '.'.join(splits[:-1]),splits[-1]
if mod_name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html#{name}'
try:
mod = importlib.import_module(mod_name)
page = _mod2page(mod)
return f'{PYTORCH_URL}{page}#{name}'
except: return None | apache-2.0 | Python |
cde3547d7492049a40858a02b85b054accc6da61 | save image to temp file | NerdHerd91/Spacewalls,NerdHerd91/Spacewalls,NerdHerd91/Spacewalls | background_changer/setbackground.py | background_changer/setbackground.py | #! /usr/bin/python
import ctypes
import os
from os.path import expanduser
import platform
import subprocess
import sys
import tempfile
import time
from threading import Thread
import urllib
image_url = "http://lorempixel.com/1920/1080/";
PLATFORM_LINUX = "Linux"
PLATFORM_OSX = "Darwin"
PLATFORM_WINDOWS = "Windows"
platform = platform.system()
if not(platform == PLATFORM_LINUX or platform == PLATFORM_OSX or platform == PLATFORM_WINDOWS):
print "Your platform (" + platform + ") is not supported."
sys.exit(-1)
def download(image_url, dest_path):
print "starting download of: " + image_url
f = urllib.urlopen(image_url)
with open(dest_path, "wb") as imageFile:
imageFile.write(f.read())
print "wrote download to: " + dest_path
# windows call to set wallpaper
SPI_SETDESKWALLPAPER = 20
# applescript to set wallpaper
OSX_SET_WALLPAPER_APPLESCRIPT = """/usr/bin/osascript<<END
tell application "Finder"
set desktop picture to POSIX file "%s"
end tell
END"""
def setBackground(image_path):
if platform == PLATFORM_LINUX:
os.system("gsettings set org.gnome.desktop.background picture-uri file://" + image_path)
if platform == PLATFORM_OSX:
subprocess.Popen(OSX_SET_WALLPAPER_APPLESCRIPT%image_path, shell=True)
if platform == PLATFORM_WINDOWS:
ctypes.windll.user32.SystemParametersInfoA(SPI_SETDESKWALLPAPER, 0, image_path, 0)
print "background changed"
def updateBackground():
(fd, filename) = tempfile.mkstemp()
try:
download(image_url, filename)
setBackground(filename)
time.sleep(2) # setting the background fails if we don't wait before deleting
finally:
os.remove(filename)
print "done"
class UpdateBackgroundThread(Thread):
def __init__(self):
self.stopped = False
Thread.__init__(self) #super constructor
def run(self):
while not self.stopped:
updateBackground()
time.sleep(5)
#UpdateBackgroundThread().start()
updateBackground()
| #! /usr/bin/python
import ctypes
import os
from os.path import expanduser
import platform
import subprocess
import sys
import time
from threading import Thread
import urllib
image_url = "http://lorempixel.com/1920/1080/";
dest_path = expanduser("~") + "/image.jpg"
PLATFORM_LINUX = "Linux"
PLATFORM_OSX = "Darwin"
PLATFORM_WINDOWS = "Windows"
platform = platform.system()
if not(platform == PLATFORM_LINUX or platform == PLATFORM_OSX or platform == PLATFORM_WINDOWS):
print "Your platform (" + platform + ") is not supported."
sys.exit(-1)
def download(image_url, dest_path):
print "starting download of: " + image_url
f = urllib.urlopen(image_url)
with open(dest_path, "wb") as imageFile:
imageFile.write(f.read())
print "wrote download to: " + dest_path
# windows call to set wallpaper
SPI_SETDESKWALLPAPER = 20
# applescript to set wallpaper
OSX_SET_WALLPAPER_APPLESCRIPT = """/usr/bin/osascript<<END
tell application "Finder"
set desktop picture to POSIX file "%s"
end tell
END"""
def setBackground(image_path):
if platform == PLATFORM_LINUX:
os.system("gsettings set org.gnome.desktop.background picture-uri file://" + image_path)
if platform == PLATFORM_OSX:
subprocess.Popen(OSX_SET_WALLPAPER_APPLESCRIPT%image_path, shell=True)
if platform == PLATFORM_WINDOWS:
ctypes.windll.user32.SystemParametersInfoA(SPI_SETDESKWALLPAPER, 0, image_path, 0)
print "background changed"
def updateBackground():
download(image_url, dest_path)
setBackground(dest_path)
class UpdateBackgroundThread(Thread):
def __init__(self):
self.stopped = False
Thread.__init__(self) #super constructor
def run(self):
while not self.stopped:
updateBackground()
time.sleep(3)
#UpdateBackgroundThread().start()
updateBackground()
| mit | Python |
7bdc81f8fc1fd069c81eea622ae4f9bc31dcb8d5 | Remove long flag | Encrylize/EasyEuler | EasyEuler/commands/list.py | EasyEuler/commands/list.py | import click
from EasyEuler import data
@click.command()
@click.option('--sort', '-s', type=click.Choice(['id', 'difficulty']),
default='id', help='Sort the list by problem attribute.')
def cli(sort):
""" Lists all available problems. """
problem_list = []
for problem in sorted(data.problems, key=lambda p: p[sort.lower()]):
problem_list.append('Problem {id}: {name}'.format(**problem))
click.echo_via_pager('\n'.join(problem_list))
| import click
from EasyEuler import data
@click.command()
@click.option('--long', '-l', is_flag=True,
help='Include problem descriptions.')
@click.option('--sort', '-s', type=click.Choice(['id', 'difficulty']),
default='id', help='Sort the list by problem attribute.')
def cli(long, sort):
""" Lists all available problems. """
problem_list = []
for problem in sorted(data.problems, key=lambda p: p[sort.lower()]):
problem_string = 'Problem {id}: {name}'.format(**problem)
if long:
problem_string += '\n=========================================\n\n'
problem_string += problem['description']
problem_string += '\n\n========================================='
problem_list.append(problem_string)
click.echo_via_pager('\n'.join(problem_list))
| mit | Python |
f9ded788d74c18cec6e41b3258f83d5ecc20e49a | Fix newLine in gDrive | TomasBedrnik/Meteo-Backend,TomasBedrnik/Meteo-Backend,TomasBedrnik/Meteo-Backend | getTemperatures.py | getTemperatures.py | #!/usr/bin/python2
#should be executed each minute with cron
#*/1 * * * * /path/getTemperatures.py
#system time has tobe rigt -> start after NTP time sync
import datetime
import os
import glob
import time
#Adafruit_DHT has to be installed -> https://github.com/adafruit/Adafruit_Python_DHT.git
import Adafruit_DHT
#Adafruit_BMP has to be installed -> https://github.com/adafruit/Adafruit_Python_BMP.git
import Adafruit_BMP.BMP085 as BMP085
import inspect
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
pin = 27
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, pin)
d = datetime.datetime.now()
base_dir = '/sys/bus/w1/devices/'
device_folder_1 = glob.glob(base_dir + '28*')[0]
device_folder_2 = glob.glob(base_dir + '28*')[1]
device_file_1 = device_folder_1 + '/w1_slave'
device_file_2 = device_folder_2 + '/w1_slave'
def read_temp_raw(device_file):
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp(device_file):
lines = read_temp_raw(device_file)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw(device_file)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
sensor = BMP085.BMP085()
temp = sensor.read_temperature()
pressure = sensor.read_pressure()
#print 'Temp = {0:0.2f} *C'.format(temp)
#print 'Pressure = {0:0.2f} Pa'.format(pressure)
filename = d.strftime("%Y-%m-%d")+".csv"
dataString = d.strftime("%Y-%m-%d %H:%M:%S,"+str(temperature)+","+str(humidity)+","+str(read_temp(device_file_1))+","+str(read_temp(device_file_2))+","+str(temp)+","+str(pressure))
with open("/home/john/meteor-Data/"+filename, "a") as f:
f.write(dataString+"\n")
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.chdir(path)
os.system(os.path.join(path, 'uploadToDrive.py') + " -add "+filename+" data \""+dataString+"\"") | #!/usr/bin/python2
#should be executed each minute with cron
#*/1 * * * * /path/getTemperatures.py
#system time has tobe rigt -> start after NTP time sync
import datetime
import os
import glob
import time
#Adafruit_DHT has to be installed -> https://github.com/adafruit/Adafruit_Python_DHT.git
import Adafruit_DHT
#Adafruit_BMP has to be installed -> https://github.com/adafruit/Adafruit_Python_BMP.git
import Adafruit_BMP.BMP085 as BMP085
import inspect
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
pin = 27
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, pin)
d = datetime.datetime.now()
base_dir = '/sys/bus/w1/devices/'
device_folder_1 = glob.glob(base_dir + '28*')[0]
device_folder_2 = glob.glob(base_dir + '28*')[1]
device_file_1 = device_folder_1 + '/w1_slave'
device_file_2 = device_folder_2 + '/w1_slave'
def read_temp_raw(device_file):
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp(device_file):
lines = read_temp_raw(device_file)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw(device_file)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
sensor = BMP085.BMP085()
temp = sensor.read_temperature()
pressure = sensor.read_pressure()
#print 'Temp = {0:0.2f} *C'.format(temp)
#print 'Pressure = {0:0.2f} Pa'.format(pressure)
filename = d.strftime("%Y-%m-%d")+".csv"
dataString = d.strftime("%Y-%m-%d %H:%M:%S,"+str(temperature)+","+str(humidity)+","+str(read_temp(device_file_1))+","+str(read_temp(device_file_2))+","+str(temp)+","+str(pressure)+"\n")
with open("/home/john/meteor-Data/"+filename, "a") as f:
f.write(dataString)
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.chdir(path)
os.system(os.path.join(path, 'uploadToDrive.py') + " -add "+filename+" data \""+dataString+"\"") | lgpl-2.1 | Python |
b761ce959f512ce391a2a073462d00e9c90ab1f3 | Migrate phase example to cartopy. | atmtools/typhon,atmtools/typhon | doc/pyplots/plot_phase.py | doc/pyplots/plot_phase.py | # -*- coding: utf-8 -*-
"""Plot to demonstrate the phase colormap. """
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import (LONGITUDE_FORMATTER, LATITUDE_FORMATTER)
from matplotlib.ticker import FuncFormatter
from typhon.plots.maps import get_cfeatures_at_scale
@FuncFormatter
def degree_formatter(x, pos):
"""Create degree ticklabels for radian data."""
return '{:.0f}\N{DEGREE SIGN}'.format(np.rad2deg(x))
# Read wind data.
with netCDF4.Dataset('_data/test_data.nc') as nc:
nth = 5
lon, lat = np.meshgrid(nc.variables['lon'][::nth],
nc.variables['lat'][::nth])
u, v = nc.variables['u'][::nth, ::nth], nc.variables['v'][::nth, ::nth]
wdir = np.arctan2(u, v) + np.pi
# Create plot with PlateCarree projection.
fig, ax = plt.subplots(figsize=(10, 8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([3, 16, 47, 56])
# Add map "features".
features = get_cfeatures_at_scale(scale='50m')
ax.add_feature(features.BORDERS)
ax.add_feature(features.COASTLINE)
ax.add_feature(features.OCEAN)
# Plot the actual data.
sm = ax.quiver(lon, lat, u, v, wdir,
cmap=plt.get_cmap('phase', 8),
transform=ccrs.PlateCarree(),
)
# Add custom colorbar for wind directions (e.g. tick format).
cb = fig.colorbar(sm, label='Wind direction', format=degree_formatter,
fraction=0.0328, pad=0.02)
cb.set_ticks(np.linspace(0, 2 * np.pi, 9))
# Add coordinate system without drawing gridlines.
gl = ax.gridlines(draw_labels=True, color='none')
gl.xformatter, gl.yformatter = LONGITUDE_FORMATTER, LATITUDE_FORMATTER
gl.xlabels_top = gl.ylabels_right = False
fig.tight_layout()
plt.show()
| # -*- coding: utf-8 -*-
"""Plot to demonstrate the phase colormap.
"""
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
from matplotlib.ticker import FuncFormatter
import typhon
@FuncFormatter
def degree_formatter(x, pos):
"""Create degree ticklabels for radian data."""
return '{:.0f}\N{DEGREE SIGN}'.format(np.rad2deg(x))
nc = Dataset('_data/test_data.nc')
nth = 5
lon, lat = np.meshgrid(nc.variables['lon'][::nth], nc.variables['lat'][::nth])
u, v = nc.variables['u'][::nth, ::nth], nc.variables['v'][::nth, ::nth]
wdir = np.arctan2(u, v) + np.pi
fig, ax = plt.subplots(figsize=(10, 8))
m = Basemap(projection='cyl', resolution='i',
llcrnrlat=47, llcrnrlon=3,
urcrnrlat=56, urcrnrlon=16)
m.drawcoastlines()
m.drawcountries()
m.drawmeridians(np.arange(0, 20, 2), labels=[0, 0, 0, 1])
m.drawparallels(np.arange(45, 60, 2), labels=[1, 0, 0, 0])
m.quiver(lon, lat, u, v, wdir, cmap=plt.get_cmap('phase', 8), latlon=True)
# Use our own ticklabel formatter.
cb = m.colorbar(label='Wind direction', format=degree_formatter)
cb.set_ticks(np.linspace(0, 2 * np.pi, 9))
fig.tight_layout()
plt.show()
| mit | Python |
8cdea69e51241497f90c576ee386e9d7a2b011af | Update hyper-parameters to test the script for training | quqixun/BrainTumorClassification,quqixun/BrainTumorClassification | src/btc_parameters.py | src/btc_parameters.py | # Brain Tumor Classification
# Script for Hyper Parameters
# Author: Qixun Qu
# Create on: 2017/10/14
# Modify on: 2017/10/14
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
import os
import json
from btc_settings import *
parent_dir = os.path.dirname(os.getcwd())
tfrecords_dir = os.path.join(parent_dir, DATA_FOLDER, TFRECORDS_FOLDER)
tpath = os.path.join(tfrecords_dir, "partial_train.tfrecord")
vpath = os.path.join(tfrecords_dir, "partial_validate.tfrecord")
json_path = os.path.join(TEMP_FOLDER, TFRECORDS_FOLDER, VOLUMES_NUM_FILE)
with open(json_path) as json_file:
volumes_num = json.load(json_file)
parameters = {
# Basic settings
"train_path": tpath,
"validate_path": vpath,
"train_num": 236, # volumes_num["train"],
"validate_num": 224, # volumes_num["validate"],
"classes_num": 3,
"patch_shape": PATCH_SHAPE,
"capacity": 350,
"min_after_dequeue": 300,
# Hyper-parameters
"batch_size": 10,
"num_epoches": 2
}
| # Brain Tumor Classification
# Script for Hyper Parameters
# Author: Qixun Qu
# Create on: 2017/10/14
# Modify on: 2017/10/14
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
import os
import json
from btc_settings import *
parent_dir = os.path.dirname(os.getcwd())
tfrecords_dir = os.path.join(parent_dir, DATA_FOLDER, TFRECORDS_FOLDER)
tpath = os.path.join(tfrecords_dir, "partial_train.tfrecord")
vpath = os.path.join(tfrecords_dir, "partial_validate.tfrecord")
json_path = os.path.join(TEMP_FOLDER, TFRECORDS_FOLDER, VOLUMES_NUM_FILE)
with open(json_path) as json_file:
volumes_num = json.load(json_file)
parameters = {
# Basic settings
"train_path": tpath,
"validate_path": vpath,
"train_num": 236, # volumes_num["train"]
"validate_num": 224, # volumes_num["validate"]
"patch_shape": PATCH_SHAPE,
"capacity": 350,
"min_after_dequeue": 300,
# Hyper-parameters
"batch_size": 20,
"num_epoches": 3
}
| mit | Python |
454b04d8426803f6e1d4fffd0ad5b2bfb3d41531 | remove unneeded line | jmercouris/NeXT,jmercouris/NeXT | ports/pyqt-webengine/minibuffer.py | ports/pyqt-webengine/minibuffer.py | from PyQt5.QtWebEngineWidgets import QWebEngineView
import core_interface
class Minibuffer(QWebEngineView):
"""Documentation for Minibuffer
"""
scripts = {}
callback_count = 0
window_identifier = 0
def __init__(self, window_identifier, parent=None):
super(Minibuffer, self).__init__(parent)
self.window_identifier = window_identifier
def evaluate_javascript(self, script):
"""
This method returns an identifier (str) to the LISP core. Upon
completion of the javascript script, the platform port will
make a call to the LISP core with the results of that
computation, and the associated identifier.
:param script: the JavaScript script to execute
:returns: a callback_id
:rtype: string
"""
self.callback_count += 1
self.page().runJavaScript(
script,
lambda x: self.javascript_callback(x, str(self.callback_count)))
return str(self.callback_count)
def javascript_callback(self, res, callback_id):
if res is None:
return
core_interface.minibuffer_javascript_call_back(str(self.window_identifier), res, callback_id)
def set_height(self, height):
self.setFixedHeight(height)
| from PyQt5.QtWebEngineWidgets import QWebEngineView
import core_interface
class Minibuffer(QWebEngineView):
"""Documentation for Minibuffer
"""
scripts = {}
callback_count = 0
window_identifier = 0
def __init__(self, window_identifier, parent=None):
super(Minibuffer, self).__init__(parent)
self.window_identifier = window_identifier
self.setHtml("") # breaks without this line
def evaluate_javascript(self, script):
"""
This method returns an identifier (str) to the LISP core. Upon
completion of the javascript script, the platform port will
make a call to the LISP core with the results of that
computation, and the associated identifier.
:param script: the JavaScript script to execute
:returns: a callback_id
:rtype: string
"""
self.callback_count += 1
self.page().runJavaScript(
script,
lambda x: self.javascript_callback(x, str(self.callback_count)))
return str(self.callback_count)
def javascript_callback(self, res, callback_id):
if res is None:
return
core_interface.minibuffer_javascript_call_back(str(self.window_identifier), res, callback_id)
def set_height(self, height):
self.setFixedHeight(height)
| bsd-3-clause | Python |
724e8770487bddaacbb8a36aca4c5299c0d64474 | clean core urls | ioO/billjobs | core/urls.py | core/urls.py | from django.conf.urls import include, url
from django.contrib import admin
admin.site.site_header = 'Coworking space administration'
urlpatterns = [
url(r'^billing/', include('billing.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
admin.site.site_header = 'Coworking space administration'
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'billjobs.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'billing.views.redirect_home'),
url(r'^admin/generate_pdf/(?P<id>\d+)/$', 'billing.views.generate_pdf',
name='generate_pdf'),
url(r'^admin/', include(admin.site.urls)),
)
| mit | Python |
ddd508257a03e7a138720a9b4b2090fd231a626d | add a filename token. | abadger/Bento,cournape/Bento,abadger/Bento,cournape/Bento,cournape/Bento,abadger/Bento,abadger/Bento,cournape/Bento | toydist/config_parser/grammar.py | toydist/config_parser/grammar.py | import os
from toydist.compat.pyparsing import \
Literal, WordStart, CharsNotIn, LineEnd, alphas, Word, \
indentedBlock, OneOrMore, ZeroOrMore, OnlyOnce, \
Group, empty, lineEnd, FollowedBy, col, alphanums, \
Forward, Optional, delimitedList, \
ParseException, ParseFatalException
#---------------------------------
# Grammar definition
#---------------------------------
# Literals
colon = Literal(':').suppress()
comma_sep = Literal(',').suppress()
string = WordStart() + CharsNotIn('\n')
word = Word(alphas)
newline = LineEnd().suppress()
module_name = Word(alphanums + '_')
full_module_name = Group(module_name + \
ZeroOrMore(Literal('.').suppress() + module_name)
)
filename = Word(alphanums + '.' + os.pathsep)
indent_stack = [1]
def checkPeerIndent(s,l,t):
cur_col = col(l,s)
if cur_col != indent_stack[-1]:
if (not indent_stack) or cur_col > indent_stack[-1]:
raise ParseFatalException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s,l,t):
cur_col = col(l,s)
if cur_col > indent_stack[-1]:
indent_stack.append(cur_col)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s,l,t):
if l >= len(s):
return
cur_col = col(l,s)
if not(cur_col < indent_stack[-1] and cur_col <= indent_stack[-2]):
raise ParseException(s, l, "not an unindent")
def doUnindent():
indent_stack.pop()
INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(checkSubIndent)
UNDENT = FollowedBy(empty).setParseAction(checkUnindent)
UNDENT.setParseAction(doUnindent)
stmt = Forward()
stmt.setParseAction(checkPeerIndent)
grammar = Group(OneOrMore(empty + stmt))
# metadata fields
name = Literal('Name')
name_definition = name + colon + word
summary = Literal('Summary')
summary_definition = summary + colon + string
author = Literal('Author')
author_definition = author + colon + string
indented_string = string.copy()
indented_string.setParseAction(checkPeerIndent)
multiline_string = Group(OneOrMore(empty + indented_string))
description_definition = Group(
Literal("Description") + colon +
INDENT + multiline_string + UNDENT)
metadata_field = (description_definition | name_definition | summary_definition \
| author_definition)
# Modules section
modules = Literal("Modules")
modules_definition = modules + colon + \
full_module_name + ZeroOrMore(comma_sep + full_module_name)
stmt << (metadata_field | modules_definition)
| from toydist.compat.pyparsing import \
Literal, WordStart, CharsNotIn, LineEnd, alphas, Word, \
indentedBlock, OneOrMore, ZeroOrMore, OnlyOnce, \
Group, empty, lineEnd, FollowedBy, col, alphanums, \
Forward, Optional, delimitedList, \
ParseException, ParseFatalException
#---------------------------------
# Grammar definition
#---------------------------------
# Literals
colon = Literal(':').suppress()
comma_sep = Literal(',').suppress()
string = WordStart() + CharsNotIn('\n')
word = Word(alphas)
newline = LineEnd().suppress()
module_name = Word(alphanums + '_')
full_module_name = Group(module_name + \
ZeroOrMore(Literal('.').suppress() + module_name)
)
indent_stack = [1]
def checkPeerIndent(s,l,t):
cur_col = col(l,s)
if cur_col != indent_stack[-1]:
if (not indent_stack) or cur_col > indent_stack[-1]:
raise ParseFatalException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s,l,t):
cur_col = col(l,s)
if cur_col > indent_stack[-1]:
indent_stack.append(cur_col)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s,l,t):
if l >= len(s):
return
cur_col = col(l,s)
if not(cur_col < indent_stack[-1] and cur_col <= indent_stack[-2]):
raise ParseException(s, l, "not an unindent")
def doUnindent():
indent_stack.pop()
INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(checkSubIndent)
UNDENT = FollowedBy(empty).setParseAction(checkUnindent)
UNDENT.setParseAction(doUnindent)
stmt = Forward()
stmt.setParseAction(checkPeerIndent)
grammar = Group(OneOrMore(empty + stmt))
# metadata fields
name = Literal('Name')
name_definition = name + colon + word
summary = Literal('Summary')
summary_definition = summary + colon + string
author = Literal('Author')
author_definition = author + colon + string
indented_string = string.copy()
indented_string.setParseAction(checkPeerIndent)
multiline_string = Group(OneOrMore(empty + indented_string))
description_definition = Group(
Literal("Description") + colon +
INDENT + multiline_string + UNDENT)
metadata_field = (description_definition | name_definition | summary_definition \
| author_definition)
# Modules section
modules = Literal("Modules")
modules_definition = modules + colon + \
full_module_name + ZeroOrMore(comma_sep + full_module_name)
stmt << (metadata_field | modules_definition)
| bsd-3-clause | Python |
cd45860f68002145dd7bcd04d201b38bff4cd501 | Bump version | xtrinch/fcm-django | fcm_django/__init__.py | fcm_django/__init__.py | __author__ = "xTrinch"
__email__ = "mojca.rojko@gmail.com"
__version__ = "1.0.12"
class NotificationError(Exception):
pass
try:
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION < (3, 2):
default_app_config = "fcm_django.apps.FcmDjangoConfig"
except ImportError:
pass
| __author__ = "xTrinch"
__email__ = "mojca.rojko@gmail.com"
__version__ = "1.0.11"
class NotificationError(Exception):
pass
try:
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION < (3, 2):
default_app_config = "fcm_django.apps.FcmDjangoConfig"
except ImportError:
pass
| mit | Python |
5114973846b90a51155233b2472bfb7b25bcae14 | Update typo in GCE service | fedora-infra/fedimg,fedora-infra/fedimg | fedimg/services/gce.py | fedimg/services/gce.py | # This file is part of fedimg.
# Copyright (C) 2014 Red Hat, Inc.
#
# fedimg is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# fedimg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with fedimg; if not, see http://www.gnu.org/licenses,
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: David Gay <dgay@redhat.com>
#
import os
import subprocess
from libcloud.compute.base import NodeImage
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider, DeploymentException
import fedimg
class GCEServiceException(Exception):
""" Custom exception for GCE. """
pass
class GCEService(object):
""" A class for interacting with a GCE connection. """
def __init__(self):
self.datacenters = ['us-central1-a']
def upload(self, raw_url):
""" Takes a URL to a .raw.xz file and registers it as an image
in each Rackspace region. """
cls = get_driver(Provider.GCE)
driver = cls(fedimg.GCE_EMAIL, fedimg.GCE_KEYPATH,
project=fedimg.GCE_PROJECT_ID,
datacenter=self.datacenters[0])
# create image from official Fedora image on GCE
# emit a fedmsg, etc
| # This file is part of fedimg.
# Copyright (C) 2014 Red Hat, Inc.
#
# fedimg is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# fedimg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with fedimg; if not, see http://www.gnu.org/licenses,
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: David Gay <dgay@redhat.com>
#
import os
import subprocess
from libcloud.compute.base import NodeImage
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider, DeploymentException
import fedimg
class GCEServiceException(Exception):
""" Custom exception for GCE. """
pass
class GCEService(object):
""" A class for interacting with a GCE connection. """
def __init__(self):
self.datacenters = ['us-central1-a']
def upload(self, raw_url):
""" Takes a URL to a .raw.xz file and registers it as an image
in each Rackspace region. """
cls = get_driver(Provider.GCE)
driver = cls(fedimg.GCE_EMAIL, fedimg.GCE_KEYPATH,
project=fedimg.GCE_PROJECT_ID,
datacenter=self.datacenters[0])
# create image from offical Fedora image on GCE
# emit a fedmsg, etc
| agpl-3.0 | Python |
10cd33aba58d59e0e44f32b1ffaf5d50e15a33b0 | prepare release 0.9.44 | SEMAFORInformatik/femagtools,SEMAFORInformatik/femagtools | femagtools/__init__.py | femagtools/__init__.py | # -*- coding: utf-8 -*-
"""
femagtools
~~~~~~~~~~
Python bindings for FEMAG
"""
__title__ = 'femagtools'
__version__ = '0.9.44'
__author__ = 'Ronald Tanner'
__license__ = 'BSD'
__copyright__ = 'Copyright 2016-2018 SEMAFOR Informatik & Energie AG'
from .bch import Reader
from .model import MachineModel
from .fsl import Builder
from .magnet import Magnet
from .femag import Femag, ZmqFemag
def read_bchfile(filename):
"""Read BCH/BATCH results from file *filename*."""
import io
bchresults = Reader()
with io.open(filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
return bchresults
def create_fsl(machine,
operatingconditions={},
magnetmat=[]):
"""create FSL command list from model parameters
Args:
machine: dict with parameters
operatuingConditions: dict with parameters
magnetmat: list fo dict with parameters
"""
model = MachineModel(machine)
builder = Builder()
magnets = []
if magnetmat:
magnets = Magnet(magnetmat)
if operatingconditions:
return builder.create(model, operatingconditions, magnets)
return builder.create_model(model) + ['save_model("cont")']
| # -*- coding: utf-8 -*-
"""
femagtools
~~~~~~~~~~
Python bindings for FEMAG
"""
__title__ = 'femagtools'
__version__ = '0.9.44-snapshot'
__author__ = 'Ronald Tanner'
__license__ = 'BSD'
__copyright__ = 'Copyright 2016-2018 SEMAFOR Informatik & Energie AG'
from .bch import Reader
from .model import MachineModel
from .fsl import Builder
from .magnet import Magnet
from .femag import Femag, ZmqFemag
def read_bchfile(filename):
"""Read BCH/BATCH results from file *filename*."""
import io
bchresults = Reader()
with io.open(filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
return bchresults
def create_fsl(machine,
operatingconditions={},
magnetmat=[]):
"""create FSL command list from model parameters
Args:
machine: dict with parameters
operatuingConditions: dict with parameters
magnetmat: list fo dict with parameters
"""
model = MachineModel(machine)
builder = Builder()
magnets = []
if magnetmat:
magnets = Magnet(magnetmat)
if operatingconditions:
return builder.create(model, operatingconditions, magnets)
return builder.create_model(model) + ['save_model("cont")']
| bsd-2-clause | Python |
c7d86d7dd82421fb1f189d15718eb9a55c5574cf | Add maybe_make_pardir() | ronrest/convenience_py,ronrest/convenience_py | file/maybe_make_dir.py | file/maybe_make_dir.py | import os
# ==============================================================================
# MAYBE_MAKE_DIR
# ==============================================================================
def maybe_make_dir(path):
""" Checks if a directory path exists on the system, if it does not, then
it creates that directory (and any parent directories needed to
create that directory)
"""
if not os.path.exists(path):
os.makedirs(path)
# ==============================================================================
# GET_PARDIR
# ==============================================================================
def get_pardir(file):
""" Given a file path, it returns the parent directory of that file. """
return os.path.dirname(file)
# ==============================================================================
# MAYBE_MAKE_PARDIR
# ==============================================================================
def maybe_make_pardir(file):
""" Takes a path to a file, and creates the necessary directory structure
on the system to ensure that the parent directory exists (if it does
not already exist)
"""
pardir = os.path.dirname(file)
if pardir.strip() != "": # ensure pardir is not an empty string
if not os.path.exists(path):
os.makedirs(path)
| import os
# ==============================================================================
# MAYBE_MAKE_DIR
# ==============================================================================
def maybe_make_dir(path):
""" Checks if a directory path exists on the system, if it does not, then
it creates that directory (and any parent directories needed to
create that directory)
"""
if not os.path.exists(path):
os.makedirs(path)
# ==============================================================================
# GET_PARDIR
# ==============================================================================
def get_pardir(file):
""" Given a file path, it returns the parent directory of that file. """
return os.path.dirname(file)
| apache-2.0 | Python |
cd7d7d6843e0159b8d272b8cfc572884d061b89a | Add test for equality between tuple and namedtuple. | pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython | tests/basics/namedtuple1.py | tests/basics/namedtuple1.py | try:
try:
from ucollections import namedtuple
except ImportError:
from collections import namedtuple
except ImportError:
print("SKIP")
raise SystemExit
T = namedtuple("Tup", ["foo", "bar"])
# CPython prints fully qualified name, what we don't bother to do so far
#print(T)
for t in T(1, 2), T(bar=1, foo=2):
print(t)
print(t[0], t[1])
print(t.foo, t.bar)
print(len(t))
print(bool(t))
print(t + t)
print(t * 3)
print([f for f in t])
print(isinstance(t, tuple))
# Check tuple can compare equal to namedtuple with same elements
print(t == (t[0], t[1]), (t[0], t[1]) == t)
# Create using positional and keyword args
print(T(3, bar=4))
try:
t[0] = 200
except TypeError:
print("TypeError")
try:
t.bar = 200
except AttributeError:
print("AttributeError")
try:
t = T(1)
except TypeError:
print("TypeError")
try:
t = T(1, 2, 3)
except TypeError:
print("TypeError")
try:
t = T(foo=1)
except TypeError:
print("TypeError")
try:
t = T(1, foo=1)
except TypeError:
print("TypeError")
# enough args, but kw is wrong
try:
t = T(1, baz=3)
except TypeError:
print("TypeError")
# bad argument for member spec
try:
namedtuple('T', 1)
except TypeError:
print("TypeError")
# Try single string
T3 = namedtuple("TupComma", "foo bar")
t = T3(1, 2)
print(t.foo, t.bar)
# Try tuple
T4 = namedtuple("TupTuple", ("foo", "bar"))
t = T4(1, 2)
print(t.foo, t.bar)
# Try single string with comma field separator
# Not implemented so far
#T2 = namedtuple("TupComma", "foo,bar")
#t = T2(1, 2)
| try:
try:
from ucollections import namedtuple
except ImportError:
from collections import namedtuple
except ImportError:
print("SKIP")
raise SystemExit
T = namedtuple("Tup", ["foo", "bar"])
# CPython prints fully qualified name, what we don't bother to do so far
#print(T)
for t in T(1, 2), T(bar=1, foo=2):
print(t)
print(t[0], t[1])
print(t.foo, t.bar)
print(len(t))
print(bool(t))
print(t + t)
print(t * 3)
print([f for f in t])
print(isinstance(t, tuple))
# Create using positional and keyword args
print(T(3, bar=4))
try:
t[0] = 200
except TypeError:
print("TypeError")
try:
t.bar = 200
except AttributeError:
print("AttributeError")
try:
t = T(1)
except TypeError:
print("TypeError")
try:
t = T(1, 2, 3)
except TypeError:
print("TypeError")
try:
t = T(foo=1)
except TypeError:
print("TypeError")
try:
t = T(1, foo=1)
except TypeError:
print("TypeError")
# enough args, but kw is wrong
try:
t = T(1, baz=3)
except TypeError:
print("TypeError")
# bad argument for member spec
try:
namedtuple('T', 1)
except TypeError:
print("TypeError")
# Try single string
T3 = namedtuple("TupComma", "foo bar")
t = T3(1, 2)
print(t.foo, t.bar)
# Try tuple
T4 = namedtuple("TupTuple", ("foo", "bar"))
t = T4(1, 2)
print(t.foo, t.bar)
# Try single string with comma field separator
# Not implemented so far
#T2 = namedtuple("TupComma", "foo,bar")
#t = T2(1, 2)
| mit | Python |
d4effa0749734293075b28a739affde55be52727 | add some unit tests for bedpetobed12 | abelhj/svtools,ernfrid/svtools,hall-lab/svtools,ernfrid/svtools,abelhj/svtools,hall-lab/svtools,hall-lab/svtools,abelhj/svtools,abelhj/svtools | tests/bedpetobed12_tests.py | tests/bedpetobed12_tests.py | from unittest import TestCase, main
import os
import sys
import tempfile
import difflib
import svtools.bedpetobed12
class TestBedpeToBlockedBedConverter(TestCase):
def setUp(self):
self.converter = svtools.bedpetobed12.BedpetoBlockedBedConverter('TESTCONV', 20)
def test_track_name(self):
self.assertEqual(self.converter.track_name(), 'track name=TESTCONV itemRgb=On\n')
def test_get_color(self):
self.assertEqual(self.converter.get_color('DEL', 10), '153,0,0')
self.assertEqual(self.converter.get_color('DEL', 50), self.converter.distant_color)
self.assertEqual(self.converter.get_color('ITX', 10), self.converter.unknown_close_color)
def test_bed12_name(self):
self.assertEqual(self.converter.bed12_name('ITX', '22', None), 'ITX;ID=22')
self.assertEqual(self.converter.bed12_name('ITX', '22', '0.25'), 'ITX;ID=22;AF=0.25')
self.assertEqual(self.converter.bed12_name('ITX', '22', '0.25', ('+', '-')), 'ITX;ID=22;AF=0.25;STR=+-')
def test_distant_coordinates(self):
self.assertEqual(self.converter.distant_coordinates('+', 600, 600), (600, 1100))
self.assertEqual(self.converter.distant_coordinates('-', 600, 600), (100, 600))
def test_distant_block_sizes(self):
self.assertEqual(self.converter.distant_block_sizes('+', 600, 800), (200, 1))
self.assertEqual(self.converter.distant_block_sizes('-', 600, 800), (1, 200))
def test_distant_block_starts(self):
self.assertEqual(self.converter.distant_block_starts('+', 600, 800), (0, 699))
self.assertEqual(self.converter.distant_block_starts('-', 600, 800), (0, 500))
class IntegrationTest_bedpetobed12(TestCase):
def run_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'bedpetobed12')
input_file = os.path.join(test_data_dir, 'input.bed')
expected_result = os.path.join(test_data_dir, 'expected.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input_file) as input_stream, os.fdopen(temp_descriptor, 'w') as output_handle:
svtools.bedpetobed12.processBEDPE(input_stream, 'BEDPE', 1000000, output_handle)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
if __name__ == "__main__":
main()
| from unittest import TestCase, main
import os
import sys
import tempfile
import difflib
import svtools.bedpetobed12
class IntegrationTest_bedpetobed12(TestCase):
def run_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'bedpetobed12')
input_file = os.path.join(test_data_dir, 'input.bed')
expected_result = os.path.join(test_data_dir, 'expected.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input_file) as input_stream, os.fdopen(temp_descriptor, 'w') as output_handle:
svtools.bedpetobed12.processBEDPE(input_stream, 'BEDPE', 1000000, output_handle)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
if __name__ == "__main__":
main()
| mit | Python |
28d5bd2c00e5d3c3dfcb5198e45407568828a67f | Remove extension from config | bravesheep/dogmatist | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
import os
import sys
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Dogmatist'
copyright = u'2015, Bravesheep'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
default_role = 'obj'
pygments_style = 'sphinx'
intersphinx_mapping = {
# 'python': ('http://python.readthedocs.org/en/latest/', None),
}
# This doesn't exist since we aren't shipping any static files ourselves.
#html_static_path = ['_static']
htmlhelp_basename = 'Dogmatist'
exclude_patterns = [
#'api' # needed for ``make gettext`` to not die.
]
language = 'en'
locale_dirs = [
'locale/',
]
gettext_compact = False
# enable highlighting for PHP code not between ``<?php ... ?>`` by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| # -*- coding: utf-8 -*-
#
import os
import sys
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx_http_domain'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Dogmatist'
copyright = u'2015, Bravesheep'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
default_role = 'obj'
pygments_style = 'sphinx'
intersphinx_mapping = {
# 'python': ('http://python.readthedocs.org/en/latest/', None),
}
# This doesn't exist since we aren't shipping any static files ourselves.
#html_static_path = ['_static']
htmlhelp_basename = 'Dogmatist'
exclude_patterns = [
#'api' # needed for ``make gettext`` to not die.
]
language = 'en'
locale_dirs = [
'locale/',
]
gettext_compact = False
# enable highlighting for PHP code not between ``<?php ... ?>`` by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| mit | Python |
4bf1ee120b2a660f9599fdd7589be1181179a28d | Fix import path for docs | mdomke/python-ulid | docs/conf.py | docs/conf.py | import os
import sys
sys.path.insert(0, os.path.abspath(".."))
project = ""
copyright = "2020, Martin Domke"
author = "Martin Domke"
# The full version, including alpha/beta/rc tags
release = "1.0.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_logo = "../logo.png"
html_theme = "alabaster"
html_theme_options = {
"github_user": "mdomke",
"github_repo": "ulid",
"description": "A library for working with ULIDs",
"sidebar_collapse": False,
"logo_text_align": "center",
}
html_sidebars = {
"**": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
pygments_style = "sphinx"
| import os
import sys
sys.path.insert(0, os.path.abspath("."))
project = ""
copyright = "2020, Martin Domke"
author = "Martin Domke"
# The full version, including alpha/beta/rc tags
release = "1.0.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_logo = "../logo.png"
html_theme = "alabaster"
html_theme_options = {
"github_user": "mdomke",
"github_repo": "ulid",
"description": "A library for working with ULIDs",
"sidebar_collapse": False,
"logo_text_align": "center",
}
html_sidebars = {
"**": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
pygments_style = "sphinx"
| mit | Python |
3f6cab19825fa3bd573f325738ce113a3eb2695f | Fix sorting. | Robpol86/sphinxcontrib-disqus,Robpol86/sphinxcontrib-disqus | docs/conf.py | docs/conf.py | """Sphinx configuration file."""
import os
import sys
import time
import sphinx_rtd_theme
sys.path.append(os.path.abspath('..'))
# General configuration.
author = 'Robpol86'
copyright = '{}, {}'.format(time.strftime('%Y'), author)
exclude_patterns = ['_build']
extensions = ['sphinxcontrib.disqus']
master_doc = 'index'
nitpicky = True
project = 'sphinxcontrib-disqus'
release = '1.0'
templates_path = ['_templates']
version = release
# Options for HTML output.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = project
# Options for extensions.
disqus_shortname = project
| """Sphinx configuration file."""
import os
import sys
import time
import sphinx_rtd_theme
sys.path.append(os.path.abspath('..'))
# General configuration.
author = 'Robpol86'
exclude_patterns = ['_build']
copyright = '{}, {}'.format(time.strftime('%Y'), author)
extensions = ['sphinxcontrib.disqus']
master_doc = 'index'
nitpicky = True
project = 'sphinxcontrib-disqus'
release = '1.0'
templates_path = ['_templates']
version = release
# Options for HTML output.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_title = project
# Options for extensions.
disqus_shortname = project
| mit | Python |
77ccd056487a4ad5d58bef50fa10c1ac1a080888 | Update function to optionally keep original extension | WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln | nlppln/utils.py | nlppln/utils.py | """NLP pipeline utility functionality"""
import os
import shutil
import glob
from nlppln import CWL_PATH
DEFAULT_DATA_DIR = '{}/.local/share/'.format(os.environ.get('HOME'))
CWL_DATA_DIR_PREFIX = 'commonwl'
def remove_ext(fname):
"""Removes the extension from a filename
"""
bn = os.path.basename(fname)
return os.path.splitext(bn)[0]
def create_dirs(fname):
"""Create (output) directories if they don't exist
"""
fname = os.path.dirname(os.path.abspath(fname))
if not os.path.exists(fname):
os.makedirs(fname)
def out_file_name(out_dir, fname, ext=None):
"""Return path of output file, given a directory, file name and extension.
Args:
out_dir (str): path to the directory where output should be written.
fname (str): name of the input file.
ext (str): file extension of the output file (defaults to None).
Returns:
str: out_dir + fname with extension replaced. If `ext` is `None`, the
original extension is kept.
"""
if ext is None:
return os.path.join(out_dir, fname)
fname = remove_ext(fname)
return os.path.join(out_dir, '{}.{}'.format(fname, ext))
def copy_cwl_files(from_dir=CWL_PATH):
"""Copy cwl files to a directory where the cwl-runner can find them.
cwl files are copied to $XDG_DATA_HOME/commonwl/ This is one of the default
locations where the cwl-runner looks for cwl files.
Args:
from_dir: Path to directory where to copy files from (default: the cwl
directory of nlppln).
"""
cwl_data_dir = os.environ.get('XDG_DATA_HOME')
if not cwl_data_dir:
cwl_data_dir = DEFAULT_DATA_DIR
cwl_data_dir = os.path.join(cwl_data_dir, CWL_DATA_DIR_PREFIX)
create_dirs(cwl_data_dir)
cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep))
for fi in cwl_files:
fo = os.path.join(cwl_data_dir, os.path.basename(fi))
shutil.copy2(fi, fo)
if __name__ == '__main__':
copy_cwl_files()
| """NLP pipeline utility functionality"""
import os
import shutil
import glob
from nlppln import CWL_PATH
DEFAULT_DATA_DIR = '{}/.local/share/'.format(os.environ.get('HOME'))
CWL_DATA_DIR_PREFIX = 'commonwl'
def remove_ext(fname):
"""Removes the extension from a filename
"""
bn = os.path.basename(fname)
return os.path.splitext(bn)[0]
def create_dirs(fname):
"""Create (output) directories if they don't exist
"""
fname = os.path.dirname(os.path.abspath(fname))
if not os.path.exists(fname):
os.makedirs(fname)
def out_file_name(out_dir, fname, ext):
fname = remove_ext(fname)
return os.path.join(out_dir, '{}.{}'.format(fname, ext))
def copy_cwl_files(from_dir=CWL_PATH):
"""Copy cwl files to a directory where the cwl-runner can find them.
cwl files are copied to $XDG_DATA_HOME/commonwl/ This is one of the default
locations where the cwl-runner looks for cwl files.
Args:
from_dir: Path to directory where to copy files from (default: the cwl
directory of nlppln).
"""
cwl_data_dir = os.environ.get('XDG_DATA_HOME')
if not cwl_data_dir:
cwl_data_dir = DEFAULT_DATA_DIR
cwl_data_dir = os.path.join(cwl_data_dir, CWL_DATA_DIR_PREFIX)
create_dirs(cwl_data_dir)
cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep))
for fi in cwl_files:
fo = os.path.join(cwl_data_dir, os.path.basename(fi))
shutil.copy2(fi, fo)
if __name__ == '__main__':
copy_cwl_files()
| apache-2.0 | Python |
bc5038c98d9107215b1c11716ba7f465f2f139d8 | fix pep8 issue | mtiny/pyroute2,nazarewk/pyroute2,vodik/pyroute2,drzaeus77/pyroute2,little-dude/pyroute2,simudream/pyroute2,roolebo/pyroute2,mtiny/pyroute2,little-dude/pyroute2,little-dude/pyroute2,roolebo/pyroute2,drzaeus77/pyroute2,vodik/pyroute2,simudream/pyroute2,nazarewk/pyroute2,tomislacker/python-iproute2,nazarewk/pyroute2,tomislacker/python-iproute2 | examples/create_vlan.py | examples/create_vlan.py | '''
Example: python ./examples/create_vlan.py [master]
Master is an interface to add VLAN to, e.g. eth0 or tap0 or
whatever else. Without parameters use tap0 as the default.
'''
import sys
from pyroute2 import IPDB
ip = IPDB()
try:
if len(sys.argv) == 2:
# Get master interface from the command line
master = ip[sys.argv[1]]['index']
else:
# Or use tap0 interface as an example
master = ip.interfaces.tap0.index
# or the same: master = ip.interfaces['tap0']['index']
with ip.create(kind='vlan', ifname='v101', link=master, vlan_id=101) as i:
# Arguments for ip.create() are executed before the transaction,
# in the IPRoute.link('add', ...) call. If create() fails, the
# interface became invalid and is not operable, you can safely
# drop it.
#
# Here goes the rest of transaction. If it fails, the interface
# continues to work, only failed transaction gets dropped.
i.add_ip('10.251.0.1', 24)
i.add_ip('10.251.0.2', 24)
i.mtu = 1400
finally:
ip.release()
| '''
Example: python ./examples/create_vlan.py [master]
Master is an interface to add VLAN to, e.g. eth0 or tap0 or
whatever else. Without parameters use tap0 as the default.
'''
import sys
from pyroute2 import IPDB
ip = IPDB()
try:
if len(sys.argv) == 2:
# Get master interface from the command line
master = ip[sys.argv[1]]['index']
else:
# Or use tap0 interface as an example
master = ip.interfaces.tap0.index
# or the same: master = ip.interfaces['tap0']['index']
with ip.create(kind='vlan', ifname='v101', link=master, vlan_id=101) as i:
# Arguments for ip.create() are executed before the transaction,
# in the IPRoute.link('add', ...) call. If create() fails, the
# interface became invalid and is not operable, you can safely
# drop it.
#
# Here goes the rest of transaction. If it fails, the interface
# continues to work, only failed transaction gets dropped.
i.add_ip('10.251.0.1', 24)
i.add_ip('10.251.0.2', 24)
i.mtu = 1400
finally:
ip.release()
| apache-2.0 | Python |
b559134bf980dc4f2410d8c94667205ad306ecd5 | Change Sphinx theme | xolox/python-npm-accel,xolox/python-npm-accel | docs/conf.py | docs/conf.py | # Accelerator for npm, the Node.js package manager.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: June 28, 2017
# URL: https://github.com/xolox/python-npm-accel
"""Sphinx documentation configuration for the `npm-accel` project."""
import os
import sys
# Add the npm-accel source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'npm-accel'
copyright = u'2017, Peter Odding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from npm_accel import __version__ as npm_accel_version # NOQA
# The short X.Y version.
version = '.'.join(npm_accel_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = npm_accel_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# http://sphinx-doc.org/ext/autodoc.html#confval-autodoc_member_order
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = dict(
executor=('https://executor.readthedocs.io/en/latest/', None),
propertymanager=('https://property-manager.readthedocs.io/en/latest/', None),
python=('http://docs.python.org/2/', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Output file base name for HTML help builder.
htmlhelp_basename = 'npmacceldoc'
| # Accelerator for npm, the Node.js package manager.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: June 28, 2017
# URL: https://github.com/xolox/python-npm-accel
"""Sphinx documentation configuration for the `npm-accel` project."""
import os
import sys
# Add the npm-accel source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'npm-accel'
copyright = u'2017, Peter Odding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from npm_accel import __version__ as npm_accel_version # NOQA
# The short X.Y version.
version = '.'.join(npm_accel_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = npm_accel_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# http://sphinx-doc.org/ext/autodoc.html#confval-autodoc_member_order
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = dict(
executor=('https://executor.readthedocs.io/en/latest/', None),
propertymanager=('https://property-manager.readthedocs.io/en/latest/', None),
python=('http://docs.python.org/2/', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Output file base name for HTML help builder.
htmlhelp_basename = 'npmacceldoc'
| mit | Python |
371f69ae2861c04bef9a4942427ad26bf431c72b | Update docs conf. | thedrow/pytest-benchmark,aldanor/pytest-benchmark,ionelmc/pytest-benchmark,SectorLabs/pytest-benchmark | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'pytest-benchmark'
year = u'2014-2015'
author = u'Ionel Cristian M\u0103rie\u0219'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/pytest-benchmark/'
}
| # -*- coding: utf-8 -*-
import re
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'pytest-benchmark'
copyright = u'2014, Ionel Cristian Mărieș'
version = release = re.findall(
'version="(.*)"',
open(os.path.join(os.path.dirname(__file__), '../setup.py')).read()
)[0]
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/pytest-benchmark/'
}
| bsd-2-clause | Python |
c199f571440626ac05cdc0c34f831c3b0f8a2b25 | Use MathJax. | memmett/PyWENO,memmett/PyWENO,memmett/PyWENO | docs/conf.py | docs/conf.py | #
# PyWENO documentation build configuration file
#
import os
import sys
# path to autogen'ed modules
sys.path.append(os.path.abspath('..'))
# mock out some extension modules
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(self, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
for mod_name in [ 'sympy', 'pyweno.codeprinters', 'pyweno.ccoeffs', 'pyweno.cweno' ]:
sys.modules[mod_name] = Mock()
# extentions
#extensions = [ 'sphinx.ext.autodoc' ]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
#jsmath_path = 'http://www.unc.edu/~mwemmett/jsmath/easy/load.js'
# general configuration
source_suffix = '.rst'
master_doc = 'index'
# html configuration
pygments_style = 'sphinx'
html_theme = 'default'
html_sidebars = {
'**': ['globaltoc.html', 'searchbox.html'],
}
# project information
project = 'PyWENO'
copyright = '2009, 2010, 2011, Matthew Emmett'
execfile('../version.py') # this sets 'version'
release = version
| #
# PyWENO documentation build configuration file
#
import os
import sys
# path to autogen'ed modules
sys.path.append(os.path.abspath('..'))
# mock out some extension modules
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(self, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
for mod_name in [ 'sympy', 'pyweno.codeprinters', 'pyweno.ccoeffs', 'pyweno.cweno' ]:
sys.modules[mod_name] = Mock()
# extentions
extensions = [ 'sphinx.ext.autodoc' ]
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
#jsmath_path = 'http://www.unc.edu/~mwemmett/jsmath/easy/load.js'
# general configuration
source_suffix = '.rst'
master_doc = 'index'
# html configuration
pygments_style = 'sphinx'
html_theme = 'default'
html_sidebars = {
'**': ['globaltoc.html', 'searchbox.html'],
}
# project information
project = 'PyWENO'
copyright = '2009, 2010, 2011, Matthew Emmett'
execfile('../version.py') # this sets 'version'
release = version
| bsd-3-clause | Python |
49fd49e7ee9fbb9dc8fdc785bc022ca26dd6ff16 | Fix dirty-flag removal in docs/conf.py | tuturto/hy,freezas/hy,kartikm/hy,adamfeuer/hy | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
import re, os, sys, time, cgi
sys.path.append(os.path.abspath(".."))
from get_version import __version__ as hy_version
# Read the Docs might dirty its checkout, so strip the dirty flag.
hy_version = re.sub('[+.]dirty\Z', '', hy_version)
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'hy'
copyright = u'%s the authors' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(hy_version.split(".")[:-1])
# The full version, including alpha/beta/rc tags.
release = hy_version
hy_descriptive_version = cgi.escape(hy_version)
if "+" in hy_version:
hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
exclude_patterns = ['_build', 'coreteam.rst']
pygments_style = 'sphinx'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_use_smartypants = False
html_show_sphinx = False
html_context = dict(
hy_descriptive_version = hy_descriptive_version)
| # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
import os, sys, time, cgi
sys.path.append(os.path.abspath(".."))
from get_version import __version__ as hy_version
if hy_version.endswith(".dirty"):
# Read the Docs might dirty its checkout, so ignore this.
hy_version = hy_version[:-len(".dirty")]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'hy'
copyright = u'%s the authors' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(hy_version.split(".")[:-1])
# The full version, including alpha/beta/rc tags.
release = hy_version
hy_descriptive_version = cgi.escape(hy_version)
if "+" in hy_version:
hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
exclude_patterns = ['_build', 'coreteam.rst']
pygments_style = 'sphinx'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_use_smartypants = False
html_show_sphinx = False
html_context = dict(
hy_descriptive_version = hy_descriptive_version)
| mit | Python |
1a98b29293ccfab6534a48402414e89726d8e5bb | Set icon, summary for notification | familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG | Python/pomodoro.py | Python/pomodoro.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import subprocess as spr
import time
def main():
start = datetime.datetime.now()
start_str = start.strftime("%H:%M:%S")
spr.call(['notify-send',
'--app-name', 'POMODORO',
'--icon', 'dialog-information',
'New pomodoro', 'From: {}'.format(start_str)])
time.sleep(30 * 60)
end = datetime.datetime.now()
duration = (end - start).total_seconds() // 60
for i in range(5):
time.sleep(3)
spr.call(
['notify-send',
'POMO: {0:.0f} minute passed.\tFrom {1}'.format(
duration,
start_str
)
]
)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import subprocess as spr
import time
def main():
start = datetime.datetime.now()
spr.call(['notify-send', 'Started new pomodoro'])
time.sleep(30 * 60)
end = datetime.datetime.now()
duration = (end - start).total_seconds() // 60
for i in range(5):
time.sleep(3)
spr.call(
['notify-send',
'POMO: {0:.0f} minute passed.\tFrom {1}'.format(
duration,
start.strftime("%H:%M:%S"))
]
)
if __name__ == "__main__":
main()
| bsd-2-clause | Python |
d65e58f108d6060166319b89686dea31e25b058d | Update wiki.py | RgTqUg/lamia | lamia/wiki.py | lamia/wiki.py | import requests
import urllib
from requests.auth import HTTPBasicAuth
class entry(object):
def __init__(self, page, auther):
self.user = auther.user
self.key = auther.key
self.id_number = page["id"]
self.created_at = page["created_at"]
self.last_update = page["updated_at"]
self.title = page["title"]
self.body = page["body"]
self.creator_id = page["creator_id"]
if page["is_locked"] == "false":
self.is_locked = False
else:
self.is_locked = True
self.other_names = page["other_names"]
self.creator_name = page["creator_name"]
def update_title(self, title):
requests.put(("http://danbooru.donmai.us/wiki_pages/" + str(self.id_number) + ".json?wiki_page[title]=" + str(title) + "&wiki_page[body]=" + self.body),
auth=HTTPBasicAuth(self.user, self.key))
def update_body(self, body):
requests.put(("http://danbooru.donmai.us/wiki_pages/" + str(self.id_number) + ".json?wiki_page[title]=" + self.title + "&wiki_page[body]=" + str(body)),
auth=HTTPBasicAuth(self.user, self.key))
| import requests
import urllib
from requests.auth import HTTPBasicAuth
class entry(object):
def __init__(self, page, auther):
self.user = auther.user
self.key = auther.key
self.id_number = page["id"]
self.created_at = page["created_at"]
self.last_update = page["updated_at"]
self.title = page["title"]
self.body = page["body"]
self.creator_id = page["creator_id"]
if page["is_locked"] == "false":
self.is_locked = False
else:
self.is_locked = True
self.other_names = page["other_names"]
self.creator_name = page["creator_name"]
def update_title(self, title):
requests.put(("http://danbooru.donmai.us/wiki_pages/" + str(self.id_number) + ".json?wiki_page[title]=" + str(title) + "&wiki_page[body]=" + self.body)
auth=HTTPBasicAuth(self.user, self.key))
def update_body(self, body):
requests.put(("http://danbooru.donmai.us/wiki_pages/" + str(self.id_number) + ".json?wiki_page[title]=" + self.title + "&wiki_page[body]=" + str(body))
auth=HTTPBasicAuth(self.user, self.key))
| mit | Python |
cba5d8b152093612eee929f0d07d088e17d74cc1 | Bump of version to 1.0.0 to make it compatible with Firefox 4 https://github.com/darwin/firepython/issues/13 | binaryage/firelogger.py | firepython/__init__.py | firepython/__init__.py | # -*- mode: python; coding: utf-8 -*-
"""
FireLogger_ server-side support library for Python.
For usage see ``README.txt`` or visit the `github homepage`_.
.. _FireLogger: https://addons.mozilla.org/en-US/firefox/addon/11090
.. _github homepage: http://github.com/darwin/firepython
"""
__api_version__ = '1.0'
# ^--- corresponds to api version of firelogger
__version__ = '1.0.0' # for python package releases
| # -*- mode: python; coding: utf-8 -*-
"""
FireLogger_ server-side support library for Python.
For usage see ``README.txt`` or visit the `github homepage`_.
.. _FireLogger: https://addons.mozilla.org/en-US/firefox/addon/11090
.. _github homepage: http://github.com/darwin/firepython
"""
__api_version__ = '0.9'
# ^--- corresponds to api version of firelogger
__version__ = '0.9.0' # for python package releases
| bsd-3-clause | Python |
a2d575aa64da07fb4d40d38b7404c63bbb8a1cd2 | Add on_delete to ForeignKey | wq/django-swappable-models | tests/default_app/models.py | tests/default_app/models.py | from django.db import models
import swapper
class BaseType(models.Model):
name = models.CharField(max_length=255)
class Meta:
abstract = True
class Type(BaseType):
class Meta:
swappable = swapper.swappable_setting("default_app", "Type")
class Item(models.Model):
type = models.ForeignKey(
swapper.get_model_name('default_app', "Type"), on_delete=models.CASCADE
)
name = models.CharField(max_length=255)
description = models.TextField()
| from django.db import models
import swapper
class BaseType(models.Model):
name = models.CharField(max_length=255)
class Meta:
abstract = True
class Type(BaseType):
class Meta:
swappable = swapper.swappable_setting("default_app", "Type")
class Item(models.Model):
type = models.ForeignKey(swapper.get_model_name('default_app', "Type"))
name = models.CharField(max_length=255)
description = models.TextField()
| mit | Python |
2627e2b7ef516a9be2b453e4c216ef7a163e0673 | Bump version to 0.6.1.post1 | oddbird/gurtel,oddbird/gurtel | gurtel/__init__.py | gurtel/__init__.py | __version__ = '0.6.1.post1'
| __version__ = '0.6.1'
| bsd-3-clause | Python |
52ea3c736178a6a7a693a7a6a819e7c24b29454f | Revert back VariantDirWrap for Simba framework (compatibility) | yanbe/platform-espressif8266 | builder/frameworks/simba.py | builder/frameworks/simba.py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simba
Simba is an RTOS and build framework. It aims to make embedded
programming easy and portable.
http://simba-os.readthedocs.org
"""
from os.path import join, sep
from SCons.Script import DefaultEnvironment, SConscript
from platformio.builder.tools import platformio as platformio_tool
#
# Backward compatibility with PlatformIO 2.0
#
platformio_tool.SRC_DEFAULT_FILTER = " ".join([
"+<*>", "-<.git%s>" % sep, "-<svn%s>" % sep,
"-<example%s>" % sep, "-<examples%s>" % sep,
"-<test%s>" % sep, "-<tests%s>" % sep
])
def LookupSources(env, variant_dir, src_dir, duplicate=True, src_filter=None):
return env.CollectBuildFiles(variant_dir, src_dir, src_filter, duplicate)
def VariantDirWrap(env, variant_dir, src_dir, duplicate=False):
env.VariantDir(variant_dir, src_dir, duplicate)
env = DefaultEnvironment()
env.AddMethod(LookupSources)
env.AddMethod(VariantDirWrap)
env.Replace(
PLATFORMFW_DIR=env.PioPlatform().get_package_dir("framework-simba")
)
SConscript(
[env.subst(join("$PLATFORMFW_DIR", "make", "platformio.sconscript"))])
| # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simba
Simba is an RTOS and build framework. It aims to make embedded
programming easy and portable.
http://simba-os.readthedocs.org
"""
from os.path import join, sep
from SCons.Script import DefaultEnvironment, SConscript
from platformio.builder.tools import platformio as platformio_tool
#
# Backward compatibility with PlatformIO 2.0
#
platformio_tool.SRC_DEFAULT_FILTER = " ".join([
"+<*>", "-<.git%s>" % sep, "-<svn%s>" % sep,
"-<example%s>" % sep, "-<examples%s>" % sep,
"-<test%s>" % sep, "-<tests%s>" % sep
])
def LookupSources(env, variant_dir, src_dir, duplicate=True, src_filter=None):
return env.CollectBuildFiles(variant_dir, src_dir, src_filter, duplicate)
env = DefaultEnvironment()
env.AddMethod(LookupSources)
env.Replace(
PLATFORMFW_DIR=env.PioPlatform().get_package_dir("framework-simba")
)
SConscript(
[env.subst(join("$PLATFORMFW_DIR", "make", "platformio.sconscript"))])
| apache-2.0 | Python |
8f1cf7f71cae44967a4fe260d149eb6c49c40d17 | Add test for successive traffic entries | agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft | tests/model/test_traffic.py | tests/model/test_traffic.py | from pycroft.model.traffic import PmacctTrafficEgress, PmacctTrafficIngress, TrafficVolume
from tests import FactoryDataTestBase
from tests.factories.traffic import PMAcctTrafficEgressFactory, PMAcctTrafficIngressFactory
from tests.factories.user import UserWithHostFactory
class PMAcctPseudoTableTest(FactoryDataTestBase):
ip = '141.30.228.39'
bad_ip = '141.30.228.1'
def create_factories(self):
self.user = UserWithHostFactory(host__interface__ip__str_address=self.ip)
def test_egress_insert(self):
egress_traffic = PMAcctTrafficEgressFactory.create(ip_src=self.ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Egress')
self.assertEqual(volume.amount, egress_traffic.bytes)
self.assertEqual(volume.packets, egress_traffic.packets)
self.assertEqual(volume.user, self.user)
def test_egress_insert_nonexistent_ip(self):
PMAcctTrafficEgressFactory.create(ip_src=self.bad_ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 0)
def test_egress_update_successive_entries(self):
data = [
# timestamp, packets, amount
('2018-03-15 00:15:00', 200, 1024),
('2018-03-15 10:15:00', 324, 500),
('2018-03-15 23:59:00', 12, 7055),
]
for stamp, packets, bytes in data:
PMAcctTrafficEgressFactory.create(
ip_src=self.ip,
stamp_inserted=stamp,
bytes=bytes,
packets=packets,
)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 1)
vol = TrafficVolume.q.one()
self.assertEqual(vol.timestamp, '2018-03-15 00:00:00')
def test_ingress_insert(self):
ingress_traffic = PMAcctTrafficIngressFactory.create(ip_dst=self.ip)
self.assertEqual(PmacctTrafficIngress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Ingress')
self.assertEqual(volume.amount, ingress_traffic.bytes)
self.assertEqual(volume.packets, ingress_traffic.packets)
self.assertEqual(volume.user, self.user)
def test_ingress_insert_nonexistent_ip(self):
PMAcctTrafficIngressFactory.create(ip_dst=self.bad_ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 0)
| from pycroft.model.traffic import PmacctTrafficEgress, PmacctTrafficIngress, TrafficVolume
from tests import FactoryDataTestBase
from tests.factories.traffic import PMAcctTrafficEgressFactory, PMAcctTrafficIngressFactory
from tests.factories.user import UserWithHostFactory
class PMAcctPseudoTableTest(FactoryDataTestBase):
ip = '141.30.228.39'
bad_ip = '141.30.228.1'
def create_factories(self):
self.user = UserWithHostFactory(host__interface__ip__str_address=self.ip)
def test_egress_insert(self):
egress_traffic = PMAcctTrafficEgressFactory.create(ip_src=self.ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Egress')
self.assertEqual(volume.amount, egress_traffic.bytes)
self.assertEqual(volume.packets, egress_traffic.packets)
self.assertEqual(volume.user, self.user)
def test_egress_insert_nonexistent_ip(self):
PMAcctTrafficEgressFactory.create(ip_src=self.bad_ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 0)
def test_ingress_insert(self):
ingress_traffic = PMAcctTrafficIngressFactory.create(ip_dst=self.ip)
self.assertEqual(PmacctTrafficIngress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Ingress')
self.assertEqual(volume.amount, ingress_traffic.bytes)
self.assertEqual(volume.packets, ingress_traffic.packets)
self.assertEqual(volume.user, self.user)
def test_ingress_insert_nonexistent_ip(self):
PMAcctTrafficIngressFactory.create(ip_dst=self.bad_ip)
self.assertEqual(PmacctTrafficEgress.q.count(), 0)
self.assertEqual(TrafficVolume.q.count(), 0)
| apache-2.0 | Python |
ebd2fc847336d5e1497b57987c255f9d2965339e | Add number days argument | dbinetti/barberscore,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore | project/api/management/commands/sync_groups.py | project/api/management/commands/sync_groups.py | # Standard Libary
import datetime
# Django
from django.core.management.base import BaseCommand
# First-Party
from api.models import Group
from bhs.models import Structure
from bhs.updaters import update_or_create_group_from_structure
from django.utils import (
timezone,
)
class Command(BaseCommand):
help = "Command to sync database with BHS ."
def add_arguments(self, parser):
parser.add_argument(
'-a',
'--all',
action='store_true',
dest='all',
default=False,
help='Update all groups.',
)
parser.add_argument(
'-d',
'--days',
type=int,
dest='days',
help='Number of days to update from.',
)
def handle(self, *args, **options):
self.stdout.write("Updating groups...")
if options['all']:
ss = Structure.objects.all()
else:
now = timezone.now()
cursor = now - datetime.timedelta(days=options['days'])
ss = Structure.objects.filter(
updated_ts__gt=cursor,
)
total = ss.count()
i = 0
for s in ss:
i += 1
update_or_create_group_from_structure(s)
self.stdout.write("{0}/{1}".format(i, total), ending='\r')
self.stdout.flush()
self.stdout.write("Updated {0} groups.".format(total))
| # Standard Libary
import datetime
# Django
from django.core.management.base import BaseCommand
# First-Party
from api.models import Group
from bhs.models import Structure
from bhs.updaters import update_or_create_group_from_structure
from django.utils import (
timezone,
)
class Command(BaseCommand):
help = "Command to sync database with BHS ."
def add_arguments(self, parser):
parser.add_argument(
'--all',
action='store_true',
dest='all',
default=False,
help='Update all groups.',
)
def handle(self, *args, **options):
self.stdout.write("Updating groups...")
if options['all']:
ss = Structure.objects.all()
else:
now = timezone.now()
cursor = now - datetime.timedelta(hours=25)
ss = Structure.objects.filter(
updated_ts__gt=cursor,
)
total = ss.count()
i = 0
for s in ss:
i += 1
update_or_create_group_from_structure(s)
self.stdout.write("{0}/{1}".format(i, total), ending='\r')
self.stdout.flush()
self.stdout.write("Updated {0} groups.".format(total))
| bsd-2-clause | Python |
d9a6071674857ceae566d29c7c77a31a5ed5214d | Return proper WWW-Authenticate header if API authentication fails | homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | byceps/blueprints/api/decorators.py | byceps/blueprints/api/decorators.py | """
byceps.blueprints.api.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from functools import wraps
from typing import Optional
from flask import abort, request
from werkzeug.datastructures import WWWAuthenticate
from ...services.authentication.api import service as api_service
def api_token_required(func):
"""Ensure the request is authenticated via API token."""
@wraps(func)
def wrapper(*args, **kwargs):
if not _has_valid_api_token():
www_authenticate = WWWAuthenticate('Bearer')
abort(401, www_authenticate=www_authenticate)
return func(*args, **kwargs)
return wrapper
def _has_valid_api_token() -> bool:
request_token = _extract_token_from_request()
if request_token is None:
return False
api_token = api_service.find_api_token_by_token(request_token)
return api_token is not None and not api_token.suspended
def _extract_token_from_request() -> Optional[str]:
header_value = request.headers.get('Authorization')
if header_value is None:
return None
return header_value.replace('Bearer ', '', 1)
| """
byceps.blueprints.api.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from functools import wraps
from typing import Optional
from flask import abort, request
from ...services.authentication.api import service as api_service
def api_token_required(func):
"""Ensure the request is authenticated via API token."""
@wraps(func)
def wrapper(*args, **kwargs):
if not _has_valid_api_token():
abort(401, www_authenticate='Bearer')
return func(*args, **kwargs)
return wrapper
def _has_valid_api_token() -> bool:
request_token = _extract_token_from_request()
if request_token is None:
return False
api_token = api_service.find_api_token_by_token(request_token)
return api_token is not None and not api_token.suspended
def _extract_token_from_request() -> Optional[str]:
header_value = request.headers.get('Authorization')
if header_value is None:
return None
return header_value.replace('Bearer ', '', 1)
| bsd-3-clause | Python |
b5653ac28ac8358127943bd1f40c22dfd8a274f3 | Make Python example output identical to C++ and Java by removing redundant spaces. | malthe/google-protobuf,malthe/google-protobuf,malthe/google-protobuf,malthe/google-protobuf,malthe/google-protobuf | examples/list_people.py | examples/list_people.py | #! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# Iterates though all people in the AddressBook and prints info about them.
def ListPeople(address_book):
for person in address_book.person:
print "Person ID:", person.id
print " Name:", person.name
if person.HasField('email'):
print " E-mail address:", person.email
for phone_number in person.phone:
if phone_number.type == addressbook_pb2.Person.MOBILE:
print " Mobile phone #:",
elif phone_number.type == addressbook_pb2.Person.HOME:
print " Home phone #:",
elif phone_number.type == addressbook_pb2.Person.WORK:
print " Work phone #:",
print phone_number.number
# Main procedure: Reads the entire address book from a file and prints all
# the information inside.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
ListPeople(address_book)
| #! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# Iterates though all people in the AddressBook and prints info about them.
def ListPeople(address_book):
for person in address_book.person:
print "Person ID:", person.id
print " Name:", person.name
if person.HasField('email'):
print " E-mail address:", person.email
for phone_number in person.phone:
if phone_number.type == addressbook_pb2.Person.MOBILE:
print " Mobile phone #: ",
elif phone_number.type == addressbook_pb2.Person.HOME:
print " Home phone #: ",
elif phone_number.type == addressbook_pb2.Person.WORK:
print " Work phone #: ",
print phone_number.number
# Main procedure: Reads the entire address book from a file and prints all
# the information inside.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
ListPeople(address_book)
| bsd-3-clause | Python |
ff4dd944335f2469cb26bbb5234e0ee377c68a18 | refactor example maurer_rose.py | mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf | examples/maurer_rose.py | examples/maurer_rose.py | # Copyright (c) 2019-2022, Manfred Moitzi
# License: MIT License
from typing import Iterable, Tuple
import pathlib
import math
import ezdxf
from ezdxf import zoom
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# draw a maurer rose with LWPOLYLINE entities
# https://en.wikipedia.org/wiki/Maurer_rose
# ------------------------------------------------------------------------------
N = 6 # The rose has n petals if N is odd, and 2N petals if N is even.
D = 71 # delta angle in degrees
STEP360 = math.tau / 360
def maurer_rose(n: int, d: int, radius: float) -> Iterable[Tuple[float, float]]:
i: float = 0.0
while i < math.tau:
k = i * d
r = radius * math.sin(n * k)
x = r * math.cos(k)
y = r * math.sin(k)
yield x, y
i += STEP360
def main(filename: str, n: int, d: int) -> None:
doc = ezdxf.new()
doc.layers.add("PETALS", color=1)
doc.layers.add("NET", color=5)
msp = doc.modelspace()
msp.add_lwpolyline(
maurer_rose(n, 1, 250), close=True, dxfattribs={"layer": "PETALS"}
)
msp.add_lwpolyline(
maurer_rose(n, d, 250), close=True, dxfattribs={"layer": "NET"}
)
zoom.extents(msp)
doc.saveas(filename)
if __name__ == "__main__":
main(str(CWD / "maurer_rose.dxf"), N, D)
| # Purpose: draw a maurer rose with polylines
# https://en.wikipedia.org/wiki/Maurer_rose
# Copyright (c) 2019-2021, Manfred Moitzi
# License: MIT License
from typing import Iterable, Tuple
import pathlib
import math
import ezdxf
from ezdxf import zoom
DIR = pathlib.Path("~/Desktop/Outbox").expanduser()
N = 6 # The rose has n petals if N is odd, and 2N petals if N is even.
D = 71 # delta angle in degrees
TWO_PI = math.pi * 2
STEP360 = TWO_PI / 360
def maurer_rose(n: int, d: int, radius: float) -> Iterable[Tuple[float, float]]:
i: float = 0.0
while i < TWO_PI:
k = i * d
r = radius * math.sin(n * k)
x = r * math.cos(k)
y = r * math.sin(k)
yield x, y
i += STEP360
def main(filename: str, n: int, d: int) -> None:
doc = ezdxf.new()
doc.layers.add("PETALS", color=1)
doc.layers.add("NET", color=5)
msp = doc.modelspace()
msp.add_lwpolyline(
maurer_rose(n, 1, 250), close=True, dxfattribs={"layer": "PETALS"}
)
msp.add_lwpolyline(
maurer_rose(n, d, 250), close=True, dxfattribs={"layer": "NET"}
)
zoom.extents(msp)
doc.saveas(filename)
if __name__ == "__main__":
main(str(DIR / "maurer_rose.dxf"), N, D)
| mit | Python |
b0fff7ee5b587213da1b76df0907ee0e9e3746ab | Print out last gameID after all tasks completed. | lanyudhy/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,lanyudhy/Halite-II,HaliteChallenge/Halite,lanyudhy/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite,HaliteChallenge/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,lanyudhy/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite,lanyudhy/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,HaliteChallenge/Halite | admin/checkTaskCompletion.py | admin/checkTaskCompletion.py | #!/usr/bin/env python3
import configparser
import time
from datetime import datetime
import pymysql
parser = configparser.ConfigParser()
parser.read("../halite.ini")
DB_CONFIG = parser["database"]
def check_compiles(db):
db.begin()
with db.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM User WHERE compileStatus != 0")
return cursor.fetchone()['COUNT(*)']
def check_workers(db, start_time):
db.begin()
with db.cursor() as cursor:
cursor.execute("SELECT workerID, lastRequestTime FROM Worker ORDER BY workerID")
workers = cursor.fetchall()
waiting = list()
for w in workers:
if w["lastRequestTime"] < start_time:
waiting.append(w)
return waiting
def main():
if ("compState" not in parser or "noGameTasks" not in parser["compState"] or
not parser["compState"]["noGameTasks"]):
print(parser["compState"]["noGameTasks"])
print("Game tasks still activated. Disable in halite.ini [compState] noGameTasks")
return
start_time = datetime.now()
db = pymysql.connect(host=DB_CONFIG['hostname'], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)
compiles = 1
workers = [1]
while compiles or workers:
compiles = check_compiles(db)
workers = check_workers(db, start_time)
if compiles:
print("Waiting for %d more compiles to complete." % (compiles,))
if workers:
print("Waiting for workers: ", end="")
print(", ".join(str(w["workerID"]) for w in workers[:5]), end="")
if len(workers) > 5:
print(" and %d more" % (len(workers) - 5,))
else:
print()
time.sleep(5)
db.begin()
with db.cursor() as cursor:
cursor.execute("SELECT MAX(gameID) FROM Game")
max_game = cursor.fetchone()["MAX(gameID)"]
print("All tasks completed, last gameID %d." % (max_game,))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import configparser
import time
from datetime import datetime
import pymysql
parser = configparser.ConfigParser()
parser.read("../halite.ini")
DB_CONFIG = parser["database"]
def check_compiles(db):
db.begin()
with db.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM User WHERE compileStatus != 0")
return cursor.fetchone()['COUNT(*)']
def check_workers(db, start_time):
db.begin()
with db.cursor() as cursor:
cursor.execute("SELECT workerID, lastRequestTime FROM Worker ORDER BY workerID")
workers = cursor.fetchall()
waiting = list()
for w in workers:
if w["lastRequestTime"] < start_time:
waiting.append(w)
return waiting
def main():
if ("compState" not in parser or "noGameTasks" not in parser["compState"] or
not parser["compState"]["noGameTasks"]):
print(parser["compState"]["noGameTasks"])
print("Game tasks still activated. Disable in halite.ini [compState] noGameTasks")
return
start_time = datetime.now()
db = pymysql.connect(host=DB_CONFIG['hostname'], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)
compiles = 1
workers = [1]
while compiles or workers:
compiles = check_compiles(db)
workers = check_workers(db, start_time)
if compiles:
print("Waiting for %d more compiles to complete." % (compiles,))
if workers:
print("Waiting for workers: ", end="")
print(", ".join(str(w["workerID"]) for w in workers[:5]), end="")
if len(workers) > 5:
print(" and %d more" % (len(workers) - 5,))
else:
print()
time.sleep(5)
print("All tasks completed.")
if __name__ == "__main__":
main()
| mit | Python |
40ed344eab49427cee04859d1ba3136d4211ebee | Return [] if the video's chunks object doesn't contain "live". | ArchiveTeam/twitchtv-discovery-grab,ArchiveTeam/twitchtv-discovery-grab | getflv.py | getflv.py | '''Given video id, output the FLV URLs
A video ID should look something like a12345.
'''
import requests
import re
import gzip
import json
import sys
CHUNK_URL = 'http://api.twitch.tv/api/videos/{0}?as3=t'
default_headers = {'User-Agent': 'ArchiveTeam'}
def main():
video_id = sys.argv[1]
output_filename = sys.argv[2]
ok_video_type, urls = fetch(video_id)
gzip_file = gzip.open(output_filename, 'w')
json.dump({
'id': video_id,
'video_type': ok_video_type,
'urls': urls
}, gzip_file, indent=2)
gzip_file.close()
def fetch(video_id):
video_id_num = re.search(r'([\d]+)', video_id).group(1)
doc = None
ok_video_type = None
for video_type in ['a', 'b', 'c']:
url = CHUNK_URL.format(video_type + video_id_num)
print('Get', url)
response = requests.get(url, headers=default_headers,)
print(response.status_code)
if response.status_code == 200:
doc = response.json()
ok_video_type = video_type
break
if not doc:
raise Exception('No results!')
if 'live' in doc['chunks']:
urls = list(chunk['url'] for chunk in doc['chunks']['live'])
else:
print('No live chunks found for item')
urls = []
return (ok_video_type, urls)
if __name__ == '__main__':
main()
| '''Given video id, output the FLV URLs
A video ID should look something like a12345.
'''
import requests
import re
import gzip
import json
import sys
CHUNK_URL = 'http://api.twitch.tv/api/videos/{0}?as3=t'
default_headers = {'User-Agent': 'ArchiveTeam'}
def main():
video_id = sys.argv[1]
output_filename = sys.argv[2]
ok_video_type, urls = fetch(video_id)
gzip_file = gzip.open(output_filename, 'w')
json.dump({
'id': video_id,
'video_type': ok_video_type,
'urls': urls
}, gzip_file, indent=2)
gzip_file.close()
def fetch(video_id):
video_id_num = re.search(r'([\d]+)', video_id).group(1)
doc = None
ok_video_type = None
for video_type in ['a', 'b', 'c']:
url = CHUNK_URL.format(video_type + video_id_num)
print('Get', url)
response = requests.get(url, headers=default_headers,)
print(response.status_code)
if response.status_code == 200:
doc = response.json()
ok_video_type = video_type
break
if not doc:
raise Exception('No results!')
urls = list(chunk['url'] for chunk in doc['chunks']['live'])
return (ok_video_type, urls)
if __name__ == '__main__':
main()
| unlicense | Python |
504d2d6ed54620c3a8b988566f2cef74503b4adf | modify import statement in online_check.py | misterlihao/network-programming-project | online_check.py | online_check.py | import wait_socket_messages as wsm
import socket_handler as sh
import socket
def ReceivingOnlineChecks(ready_socket):
while(1):
check, from_who = ready_socket.recvfrom(10)
ready_socket.sendto(b'Online',(from_who, check_online_port))
ready_socket.close()
pass
def CheckSomeoneOnline(ip):
s = sh.CreatePort(check_online_type, check_online_ip, check_online_port, check_online_timeout)
try:
s.connect(ip, check_online_port)
except:
s.close()
return False
s.close()
return True
def StartRecvMessage():
'''wait_for_message''' | import wait_socket_messages as wsm
import socket_handler as sh
import socket
def ReceivingOnlineChecks(ready_socket):
while(1):
check, from_who = ready_socket.recvfrom(10)
ready_socket.sendto(b'Online',(from_who, check_online_port))
ready_socket.close()
pass
def CheckSomeoneOnline(ip):
s = CreatePort(check_online_type, check_online_ip, check_online_port, check_online_timeout)
try:
s.connect(ip, check_online_port)
except:
s.close()
return False
s.close()
return True
def StartRecvMessage():
'''wait_for_message''' | mit | Python |
8de8da05bb9461aaa48d3058f5b1e2caab6191f1 | Use dict() syntax that works in Python 2.6 onward | OpenMath/py-openmath | openmath/xml.py | openmath/xml.py | from . import openmath as om
openmath_ns = "http://www.openmath.org/OpenMath"
omtags = {
"OMOBJ": om.OMObject,
"OMR": om.OMReference,
"OMI": om.OMInteger,
"OMF": om.OMFloat,
"OMSTR": om.OMString,
"OMB": om.OMBytes,
"OMS": om.OMSymbol,
"OMV": om.OMVariable,
"OMFOREIGN": om.OMForeign,
"OMA": om.OMApplication,
"OMATTR": om.OMAttribution,
"OMATP": om.OMAttributionPairs,
"OMBIND": om.OMBinding,
"OMBVAR": om.OMBindVariables,
"OME": om.OMError
}
inv_omtags = dict((v,k) for k,v in omtags.items())
def tag_to_object(tag, ns=True):
if ns and not tag.startswith('{%s}' % openmath_ns):
raise ValueError('Invalid namespace')
return omtags[tag.split('}')[-1]]
def object_to_tag(obj, ns=True):
tpl = '{%(ns)s}%(tag)s' if ns else '%(tag)s'
return tpl % { "ns": openmath_ns, "tag": inv_omtags(obj) }
| from . import openmath as om
openmath_ns = "http://www.openmath.org/OpenMath"
omtags = {
"OMOBJ": om.OMObject,
"OMR": om.OMReference,
"OMI": om.OMInteger,
"OMF": om.OMFloat,
"OMSTR": om.OMString,
"OMB": om.OMBytes,
"OMS": om.OMSymbol,
"OMV": om.OMVariable,
"OMFOREIGN": om.OMForeign,
"OMA": om.OMApplication,
"OMATTR": om.OMAttribution,
"OMATP": om.OMAttributionPairs,
"OMBIND": om.OMBinding,
"OMBVAR": om.OMBindVariables,
"OME": om.OMError
}
inv_omtags = {(v,k) for k,v in omtags.items()}
def tag_to_object(tag, ns=True):
if ns and not tag.startswith('{%s}' % openmath_ns):
raise ValueError('Invalid namespace')
return omtags[tag.split('}')[-1]]
def object_to_tag(obj, ns=True):
tpl = '{%(ns)s}%(tag)s' if ns else '%(tag)s'
return tpl % { "ns": openmath_ns, "tag": inv_omtags(obj) }
| mit | Python |
ab4ae040895c50da6cb0827f6461d1733c7fe30a | Add real plugin state tests for plugins that always work (meta, containers, services). | Oxygem/canaryd,Oxygem/canaryd | tests/test_plugin_states.py | tests/test_plugin_states.py | from contextlib import contextmanager
from os import path
from unittest import TestCase
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd_packages import six
from canaryd.plugin import get_plugin_by_name
class TestPluginRealStates(TestCase):
def run_plugin(self, plugin_name):
plugin = get_plugin_by_name(plugin_name)
plugin.get_state({})
def test_meta_plugin(self):
self.run_plugin('meta')
def test_services_plugin(self):
self.run_plugin('services')
def test_containers_plugin(self):
self.run_plugin('containers')
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError('Broken tests: {0} not in commands: {1}'.format(
command, commands.keys(),
))
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
| from contextlib import contextmanager
from os import path
from unittest import TestCase
from canaryd_packages import six
from dictdiffer import diff
from jsontest import JsonTest
from mock import patch
from canaryd.plugin import get_plugin_by_name
@six.add_metaclass(JsonTest)
class TestPluginStates(TestCase):
jsontest_files = path.join('tests/plugins')
@contextmanager
def patch_commands(self, commands):
def handle_command(command, *args, **kwargs):
command = command[0]
if command not in commands:
raise ValueError(
'Broken tests: {0} not in commands'.format(command),
)
return '\n'.join(commands[command])
check_output_patch = patch(
'canaryd.subprocess.check_output',
handle_command,
)
check_output_patch.start()
yield
check_output_patch.stop()
def jsontest_function(self, test_name, test_data):
plugin = get_plugin_by_name(test_data['plugin'])
with self.patch_commands(test_data['commands']):
state = plugin.get_state({})
try:
self.assertEqual(state, test_data['state'])
except AssertionError:
print(list(diff(test_data['state'], state)))
raise
| mit | Python |
9472e9670da6d8b3e7ff7cb0ffd23b8362f7985c | remove extra print | pavlov99/jsonapi,pavlov99/jsonapi | tests/testapp/middleware.py | tests/testapp/middleware.py | from django.http import HttpResponse
from django.conf import settings
import json
class NonHtmlDebugToolbarMiddleware(object):
""" The Django Debug Toolbar usually only works for views that return HTML.
This middleware wraps any non-HTML response in HTML if the request
has a 'debug' query parameter (e.g. http://localhost/foo?debug)
Special handling for json (pretty printing) and
binary data (only show data length).
"""
@staticmethod
def process_response(request, response):
if settings.DEBUG:
if response['Content-Type'] != 'text/html':
content = response.content.decode('utf8')
try:
json_ = json.loads(content)
content = json.dumps(json_, sort_keys=True, indent=2)
except ValueError:
pass
response = HttpResponse(
'<html><body><pre>{}</pre></body></html>'.format(content)
)
return response
| from django.http import HttpResponse
from django.conf import settings
import json
class NonHtmlDebugToolbarMiddleware(object):
""" The Django Debug Toolbar usually only works for views that return HTML.
This middleware wraps any non-HTML response in HTML if the request
has a 'debug' query parameter (e.g. http://localhost/foo?debug)
Special handling for json (pretty printing) and
binary data (only show data length).
"""
@staticmethod
def process_response(request, response):
if settings.DEBUG:
if response['Content-Type'] != 'text/html':
content = response.content.decode('utf8')
print(response['Content-Type'], content)
try:
json_ = json.loads(content)
content = json.dumps(json_, sort_keys=True, indent=2)
except ValueError:
pass
response = HttpResponse(
'<html><body><pre>{}</pre></body></html>'.format(content)
)
return response
| mit | Python |
3c572d213cdab91449427e76fc13ae5ef796b5a4 | Fix #15 in ansible >= 2.3.0 | h-hirokawa/atom-autocomplete-ansible,h-hirokawa/atom-autocomplete-ansible | lib/parse_ansible.py | lib/parse_ansible.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs
from ansible.utils.display import Display
display = Display()
def main():
doc_cli = DocCLI([])
module_paths = module_loader._get_paths()
module_keys = ('module', 'short_description', 'options', 'deprecated')
for path in module_paths:
doc_cli.find_modules(path)
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in sorted(set(doc_cli.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc = module_docs.get_docstring(filename)[0]
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all():
name = os.path.splitext(os.path.basename(lookup._original_path))[0]
result['lookup_plugins'].append(name)
print(json.dumps(result))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs
from ansible.utils.display import Display
display = Display()
def main():
doc_cli = DocCLI([])
module_paths = module_loader._get_paths()
module_keys = ('module', 'short_description', 'options', 'deprecated')
for path in module_paths:
doc_cli.find_modules(path)
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in sorted(set(doc_cli.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, _, _ = module_docs.get_docstring(filename)
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all():
name = os.path.splitext(os.path.basename(lookup._original_path))[0]
result['lookup_plugins'].append(name)
print(json.dumps(result))
if __name__ == '__main__':
main()
| mit | Python |
02696b462a4ca40c0df1051f721cbba7822854f7 | Remove stray blank line | Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService | d4s2/urls.py | d4s2/urls.py | from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.contrib.auth import views as auth_views
from rest_framework.authtoken import views as authtoken_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^ownership/', include('ownership.urls')),
url(r'^auth/', include('d4s2_auth.urls')),
url(r'^api/v1/', include('d4s2_api.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^api-token-auth/', authtoken_views.obtain_auth_token),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'd4s2_auth/login.html' }, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'template_name': 'd4s2_auth/logged_out.html' }, name='logout'),
# Redirect / to /accounts/login
url(r'^$', RedirectView.as_view(pattern_name='auth-home', permanent=False)),
]
| from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.contrib.auth import views as auth_views
from rest_framework.authtoken import views as authtoken_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^ownership/', include('ownership.urls')),
url(r'^auth/', include('d4s2_auth.urls')),
url(r'^api/v1/', include('d4s2_api.urls')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^api-token-auth/', authtoken_views.obtain_auth_token),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'd4s2_auth/login.html' }, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'template_name': 'd4s2_auth/logged_out.html' }, name='logout'),
# Redirect / to /accounts/login
url(r'^$', RedirectView.as_view(pattern_name='auth-home', permanent=False)),
]
| mit | Python |
698b29a82b94d9d819fa515e7593082abfccbc69 | Add environment update needed for issue #510 test | jenisys/behave,jenisys/behave | issue.features/environment.py | issue.features/environment.py | # -*- coding: UTF-8 -*-
# FILE: features/environment.py
"""
Functionality:
* active tags
"""
from __future__ import print_function
from behave.tag_matcher import ActiveTagMatcher, setup_active_tag_values
from behave4cmd0.setup_command_shell import setup_command_shell_processors4behave
import six
import sys
import platform
import os.path
def require_tool(tool_name):
"""Check if a tool (an executable program) is provided on this platform.
:params tool_name: Name of the tool to check if it is available.
:return: True, if tool is found.
:return: False, if tool is not available (or not in search path).
"""
# print("CHECK-TOOL: %s" % tool_name)
path = os.environ.get("PATH")
if not path:
return False
for searchdir in path.split(os.pathsep):
executable1 = os.path.normpath(os.path.join(searchdir, tool_name))
executables = [executable1]
if sys.platform.startswith("win"):
executables.append(executable1 + ".exe")
for executable in executables:
# print("TOOL-CHECK: %s" % os.path.abspath(executable))
if os.path.isfile(executable):
# print("TOOL-FOUND: %s" % os.path.abspath(executable))
return True
# -- OTHERWISE: Tool not found
# print("TOOL-NOT-FOUND: %s" % tool_name)
return False
def as_bool_string(value):
if bool(value):
return "yes"
else:
return "no"
# -- MATCHES ANY TAGS: @use.with_{category}={value}
# NOTE: active_tag_value_provider provides category values for active tags.
active_tag_value_provider = {
"python2": str(six.PY2).lower(),
"python3": str(six.PY3).lower(),
# -- python.implementation: cpython, pypy, jython, ironpython
"python.implementation": platform.python_implementation().lower(),
"pypy": str("__pypy__" in sys.modules).lower(),
"os": sys.platform,
"xmllint": as_bool_string(require_tool("xmllint")),
}
active_tag_matcher = ActiveTagMatcher(active_tag_value_provider)
def before_all(context):
# -- SETUP ACTIVE-TAG MATCHER (with userdata):
# USE: behave -D browser=safari ...
# NOT-NEEDED: setup_active_tag_values(active_tag_value_provider,
# context.config.userdata)
setup_command_shell_processors4behave()
def before_feature(context, feature):
if active_tag_matcher.should_exclude_with(feature.tags):
feature.skip(reason=active_tag_matcher.exclude_reason)
def before_scenario(context, scenario):
if active_tag_matcher.should_exclude_with(scenario.effective_tags):
scenario.skip(reason=active_tag_matcher.exclude_reason)
| # -*- coding: UTF-8 -*-
# FILE: features/environment.py
"""
Functionality:
* active tags
"""
from behave.tag_matcher import ActiveTagMatcher, setup_active_tag_values
from behave4cmd0.setup_command_shell import setup_command_shell_processors4behave
import six
import sys
import platform
# -- MATCHES ANY TAGS: @use.with_{category}={value}
# NOTE: active_tag_value_provider provides category values for active tags.
active_tag_value_provider = {
"python2": str(six.PY2).lower(),
"python3": str(six.PY3).lower(),
# -- python.implementation: cpython, pypy, jython, ironpython
"python.implementation": platform.python_implementation().lower(),
"pypy": str("__pypy__" in sys.modules).lower(),
"os": sys.platform,
}
active_tag_matcher = ActiveTagMatcher(active_tag_value_provider)
def before_all(context):
# -- SETUP ACTIVE-TAG MATCHER (with userdata):
# USE: behave -D browser=safari ...
# NOT-NEEDED: setup_active_tag_values(active_tag_value_provider,
# context.config.userdata)
setup_command_shell_processors4behave()
def before_feature(context, feature):
if active_tag_matcher.should_exclude_with(feature.tags):
feature.skip(reason=active_tag_matcher.exclude_reason)
def before_scenario(context, scenario):
if active_tag_matcher.should_exclude_with(scenario.effective_tags):
scenario.skip(reason=active_tag_matcher.exclude_reason)
| bsd-2-clause | Python |
102374c49da1f2384dd9a7422b9e5463f24c2d54 | Update gmp to 6.0.0a and enable building on both darwin-32 and darwin-64 | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/gmp.py | packages/gmp.py | class GmpPackage (Package):
def __init__ (self):
Package.__init__ (self, 'gmp', '6.0.0a', sources = [
'https://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.xz' ],
configure_flags = ['--enable-cxx', '--disable-dependency-tracking']
)
self.source_dir_name = 'gmp-6.0.0'
def arch_build (self, arch):
if arch == 'darwin-32':
self.local_ld_flags = ['-arch i386']
self.local_gcc_flags = ['-arch i386']
self.configure_flags.extend (['ABI=32'])
if arch == 'darwin-64':
self.local_ld_flags = ['-arch x86_64']
self.local_gcc_flags = ['-arch x86_64']
self.configure_flags.extend (['ABI=64'])
Package.arch_build (self, arch, defaults = False)
GmpPackage ()
| class GmpPackage (Package):
def __init__ (self):
Package.__init__ (self, 'gmp', '5.0.4', sources = [
'ftp://ftp.gmplib.org/pub/%{name}-%{version}/%{name}-%{version}.tar.bz2'
],
configure_flags = ['--enable-cxx --disable-dependency-tracking'])
if Package.profile.name == 'darwin':
self.configure_flags.extend (['ABI=32'])
GmpPackage ()
| mit | Python |
e78adec9a36c6279b9258f53e075a39bcfbb1dc0 | replace callbacks with coroutines | LearnProgramming/lpmc-site,LearnProgramming/lpmc-site,LearnProgramming/lpmc-site | github.py | github.py | import config
import tornado.auth
import tornado.escape
import tornado.gen
import tornado.httpclient
import tornado.httputil
class GithubMixin(tornado.auth.OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = 'https://github.com/login/oauth/authorize'
_OAUTH_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
def authorize_redirect(self, **kwargs):
kwargs['client_id'] = config.web.github_client_id
super(GithubMixin, self).authorize_redirect(**kwargs)
@tornado.gen.coroutine
def get_authenticated_user(self, redirect_uri, code):
url = self._oauth_request_token_url(
redirect_uri=redirect_uri,
code=code,
client_id=config.web.github_client_id,
client_secret=config.web.github_client_secret,
)
response = yield self._http(url)
data = tornado.escape.json_decode(response.body)
access_token = data['access_token']
user = yield self.github_request('/user', access_token)
user['access_token'] = access_token
return user
@tornado.gen.coroutine
def github_request(self, path, access_token=None, method='GET', body=None, **args):
args['access_token'] = access_token
url = tornado.httputil.url_concat('https://api.github.com' + path, args)
if body is not None:
body = tornado.escape.json_encode(body)
response = yield self._http(url, method=method, body=body)
return tornado.escape.json_decode(response.body)
@staticmethod
@tornado.gen.coroutine
def _http(*args, **kwargs):
kwargs['headers'] = {
'Accept': 'application/json',
'User-Agent': 'raylu', # http://developer.github.com/v3/#user-agent-required
}
response = yield tornado.httpclient.AsyncHTTPClient().fetch(*args, **kwargs)
if response.error:
raise Exception('%s\n%s' % (response.error, response.body))
return response
| import config
import tornado.auth
import tornado.concurrent
import tornado.httpclient
import tornado.escape
import tornado.httputil
class GithubMixin(tornado.auth.OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = 'https://github.com/login/oauth/authorize'
_OAUTH_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
def authorize_redirect(self, **kwargs):
kwargs['client_id'] = config.web.github_client_id
super(GithubMixin, self).authorize_redirect(**kwargs)
@tornado.concurrent.return_future
def get_authenticated_user(self, redirect_uri, code, callback=None):
url = self._oauth_request_token_url(
redirect_uri=redirect_uri,
code=code,
client_id=config.web.github_client_id,
client_secret=config.web.github_client_secret,
)
self._http(
url,
self.async_callback(self._on_access_token, redirect_uri, callback)
)
def _on_access_token(self, redirect_uri, callback, response):
if response.error:
raise Exception(response.error)
data = tornado.escape.json_decode(response.body)
access_token = data['access_token']
self.github_request(
'/user',
callback=self.async_callback(self._on_get_user_info, callback, access_token),
access_token=access_token,
)
def _on_get_user_info(self, callback, access_token, user):
user['access_token'] = access_token
callback(user)
def github_request(self, path, callback, access_token=None, method='GET', body=None, **args):
args['access_token'] = access_token
url = tornado.httputil.url_concat('https://api.github.com' + path, args)
if body is not None:
body = tornado.escape.json_encode(body)
self._http(url, callback=self.async_callback(self._parse_response, callback),
method=method, body=body)
def _parse_response(self, callback, response):
if response.error:
raise Exception('%s\n%s' % (response.error, response.body))
data = tornado.escape.json_decode(response.body)
callback(data)
@staticmethod
def _http(*args, **kwargs):
kwargs['headers'] = {
'Accept': 'application/json',
'User-Agent': 'raylu', # http://developer.github.com/v3/#user-agent-required
}
tornado.httpclient.AsyncHTTPClient().fetch(*args, **kwargs)
| mit | Python |
2ae440b9bcc6f6d322e5be72382eeb5218bec217 | remove spaces | openprocurement/restkit,benoitc/restkit | examples/test_gevent.py | examples/test_gevent.py | import timeit
from gevent import monkey; monkey.patch_all()
import gevent
from restkit import *
from restkit.globals import set_manager, get_manager
from restkit.manager.mgevent import GeventManager
#set_logging("debug")
print "Manager was: %s" % type(get_manager())
set_manager(GeventManager())
print"Manager is set to: %s" %type(get_manager())
urls = [
"http://yahoo.fr",
"http://google.com",
"http://friendpaste.com",
"http://benoitc.io",
"http://couchdb.apache.org"]
allurls = []
for i in range(10):
allurls.extend(urls)
def fetch(u):
c = Client()
c.url = u
c.follow_redirect=True
r = c.perform()
print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))
def extract():
jobs = [gevent.spawn(fetch, url) for url in allurls]
gevent.joinall(jobs)
t = timeit.Timer(stmt=extract)
print "%.2f s" % t.timeit(number=1)
| import timeit
from gevent import monkey; monkey.patch_all()
import gevent
from restkit import *
from restkit.globals import set_manager, get_manager
from restkit.manager.mgevent import GeventManager
#set_logging("debug")
print "Manager was: %s" % type(get_manager())
set_manager(GeventManager())
print"Manager is set to: %s" %type(get_manager())
urls = [
"http://yahoo.fr",
"http://google.com",
"http://friendpaste.com",
"http://benoitc.io",
"http://couchdb.apache.org"]
allurls = []
for i in range(10):
allurls.extend(urls)
def fetch(u):
c = Client()
c.url = u
c.follow_redirect=True
r = c.perform()
print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))
def extract():
jobs = [gevent.spawn(fetch, url) for url in allurls]
gevent.joinall(jobs)
t = timeit.Timer(stmt=extract)
print "%.2f s" % t.timeit(number=1)
| apache-2.0 | Python |
53ca3d984213436199879eae0b79a87fd5d97fa7 | Fix error HTTP 414 Request URI Too Long | valdur55/py3status,valdur55/py3status,tobes/py3status,Spirotot/py3status,Andrwe/py3status,ultrabug/py3status,valdur55/py3status,alexoneill/py3status,guiniol/py3status,ultrabug/py3status,Shir0kamii/py3status,docwalter/py3status,ultrabug/py3status,vvoland/py3status,tobes/py3status,Andrwe/py3status,guiniol/py3status | py3status/modules/icinga_simple.py | py3status/modules/icinga_simple.py | # -*- coding: utf-8 -*-
"""
Display Icinga2 service status information
Configuration Parameters:
- cache_timeout: how often the data should be updated
- base_url: the base url to the icinga-web2 services list
- disable_acknowledge: enable or disable counting of acknowledged service problems
- user: username to authenticate against the icinga-web2 interface
- password: password to authenticate against the icinga-web2 interface
- format: define a format string like "CRITICAL: %d"
- color: define a color for the output
- status: set the status you want to optain (0=OK,1=WARNING,2=CRITICAL,3=UNKNOWN)
@author Ben Oswald <ben.oswald@root-space.de>
@license MIT License <https://opensource.org/licenses/MIT>
@source https://github.com/nazco/i3status-modules
"""
from time import time
import requests
class Py3status:
"""
"""
STATUS_NAMES = {
0: 'OK',
1: 'WARNING',
2: 'CRITICAL',
3: 'UNKNOWN'
}
# available configuration parameters
cache_timeout = 60
base_url = ''
disable_acknowledge = False
url_parameters = "?service_state={service_state}&format=json"
user = ''
password = ''
ca = True
format = '{status_name}: {count}'
color = '#ffffff'
status = 0
def get_status(self, i3s_output_list, i3s_config):
response = {
'color': self.color,
'cached_until': time() + self.cache_timeout,
'full_text': self.format.format(
status_name=self.STATUS_NAMES.get(self.status, "INVALID STATUS"),
count=self._query_service_count(self.status)
)
}
return response
def _query_service_count(self, state):
url_parameters = self.url_parameters
if self.disable_acknowledge:
url_parameters = url_parameters + "&service_handled=0"
result = requests.get(
self.base_url + url_parameters.format(service_state=state),
auth=(self.user, self.password), verify=self.ca)
return len(result.json())
if __name__ == "__main__":
pass
| # -*- coding: utf-8 -*-
"""
Display Icinga2 service status information
Configuration Parameters:
- cache_timeout: how often the data should be updated
- base_url: the base url to the icinga-web2 services list
- disable_acknowledge: enable or disable counting of acknowledged service problems
- user: username to authenticate against the icinga-web2 interface
- password: password to authenticate against the icinga-web2 interface
- format: define a format string like "CRITICAL: %d"
- color: define a color for the output
- status: set the status you want to optain (0=OK,1=WARNING,2=CRITICAL,3=UNKNOWN)
@author Ben Oswald <ben.oswald@root-space.de>
@license MIT License <https://opensource.org/licenses/MIT>
@source https://github.com/nazco/i3status-modules
"""
from time import time
import requests
class Py3status:
"""
"""
STATUS_NAMES = {
0: 'OK',
1: 'WARNING',
2: 'CRITICAL',
3: 'UNKNOWN'
}
# available configuration parameters
cache_timeout = 60
base_url = ''
disable_acknowledge = False
url_parameters = "?service_state={service_state}&format=json"
user = ''
password = ''
ca = True
format = '{status_name}: {count}'
color = '#ffffff'
status = 0
def get_status(self, i3s_output_list, i3s_config):
response = {
'color': self.color,
'cached_until': time() + self.cache_timeout,
'full_text': self.format.format(
status_name=self.STATUS_NAMES.get(self.status, "INVALID STATUS"),
count=self._query_service_count(self.status)
)
}
return response
def _query_service_count(self, state):
if self.disable_acknowledge:
self.url_parameters = self.url_parameters + "&service_handled=0"
result = requests.get(
self.base_url + self.url_parameters.format(service_state=state),
auth=(self.user, self.password), verify=self.ca)
return len(result.json())
if __name__ == "__main__":
pass
| bsd-3-clause | Python |
4f6bdfd2e32fc0cfa80ce2a6b81dce83ee5cdaf8 | support namespaced JSON objects | wq/wq.io,wq/wq.io | parsers/text.py | parsers/text.py | import csv, json
from lxml import etree as xml
class CsvParser(object):
def parse(self):
self.csvdata = csv.DictReader(self.file)
self.data = [row for row in self.csvdata]
def dump(self, file=None):
if file is None:
file = self.file
csvout = csv.DictWriter(file, self.get_field_names())
csvout.writeheader()
for row in self.data:
csvout.writerow(row)
@property
def field_names(self):
return self.csvdata.fieldnames
class JsonParser(object):
indent = None
namespace = None
def parse(self):
try:
obj = json.load(self.file)
if self.namespace:
for key in self.namespace.split('.'):
obj = obj[key]
self.data = map(self.parse_item, obj)
except ValueError:
self.data = []
def parse_item(self, item):
return item
def dump(self, file=None):
if file is None:
file = self.file
obj = map(self.dump_item, self.data)
if self.namespace:
for key in reversed(self.namespace.split('.')):
obj = {key: obj}
json.dump(obj, file, indent=self.indent)
def dump_item(self, item):
return item
class XmlParser(object):
root_tag = None
item_tag = None
def parse(self):
doc = xml.parse(self.file)
root = doc.getroot()
if self.root_tag is None:
self.root_tag = root.tag
if self.item_tag is None:
self.item_tag = list(root)[0].tag
self.data = map(self.parse_item, root.findall(self.item_tag))
def parse_item(self, el):
return {e.tag: e.text for e in el}
def dump(self, file=None):
if file is None:
file = self.file
root = xml.Element(self.root_tag)
for item in self.data:
root.append(self.dump_item(item))
xml.ElementTree(root).write(file)
def dump_item(self, item):
el = xml.Element(self.item_tag)
for key in self.get_field_names():
if key not in item or item[key] is None:
continue
sel = xml.SubElement(el, key)
sel.text = unicode(item.get(key))
return el
| import csv, json
from lxml import etree as xml
class CsvParser(object):
def parse(self):
self.csvdata = csv.DictReader(self.file)
self.data = [row for row in self.csvdata]
def dump(self, file=None):
if file is None:
file = self.file
csvout = csv.DictWriter(file, self.get_field_names())
csvout.writeheader()
for row in self.data:
csvout.writerow(row)
@property
def field_names(self):
return self.csvdata.fieldnames
class JsonParser(object):
indent = None
def parse(self):
try:
self.data = json.load(self.file)
except:
self.data = []
def dump(self, file=None):
if file is None:
file = self.file
json.dump(self.data, file, indent=self.indent)
class XmlParser(object):
root_tag = None
item_tag = None
def parse(self):
doc = xml.parse(self.file)
root = doc.getroot()
if self.root_tag is None:
self.root_tag = root.tag
if self.item_tag is None:
self.item_tag = list(root)[0].tag
self.data = map(self.parse_item, root.findall(self.item_tag))
def parse_item(self, el):
return {e.tag: e.text for e in el}
def dump(self, file=None):
if file is None:
file = self.file
root = xml.Element(self.root_tag)
for item in self.data:
root.append(self.dump_item(item))
xml.ElementTree(root).write(file)
def dump_item(self, item):
el = xml.Element(self.item_tag)
for key in self.get_field_names():
if key not in item or item[key] is None:
continue
sel = xml.SubElement(el, key)
sel.text = unicode(item.get(key))
return el
| mit | Python |
1b010ae76dae7e45d835b2e160df81f67e5349ce | Fix pep8 issue | pyQode/pyqode.python,mmolero/pyqode.python,zwadar/pyqode.python,pyQode/pyqode.python | pyqode/python/frontend/__init__.py | pyqode/python/frontend/__init__.py | # -*- coding: utf-8 -*-
"""
The frontend packages contains classes and functions related to
tge gui side application. This is where you will find the python
specific modes and panels and an already configured CodeEdit made
for python programming.
"""
import re
import sys
from pyqode.core.frontend import open_file as _open_file
from pyqode.python.frontend.code_edit import PyCodeEdit
from pyqode.python.frontend import modes
from pyqode.python.frontend import panels
def open_file(editor, path, replace_tabs_by_spaces=True):
"""
Extends pyqode.core.frontend.open_file to detect encoding from the script
coding tag (we can do that only for python scripts).
"""
def detect_encoding(path, default):
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
.. note:: code taken and adapted from
```jedi.common.source_to_unicode.detect_encoding```
"""
import ast
with open(path, 'rb') as f:
source = f.read()
# take care of line encodings (not in jedi)
source = source.replace(b'\r', b'')
source_str = str(source).replace('\\n', '\n')
byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
return default
_open_file(editor, path, replace_tabs_by_spaces=replace_tabs_by_spaces,
detect_encoding_func=detect_encoding,
default_encoding='iso-8859-1')
| # -*- coding: utf-8 -*-
"""
The frontend packages contains classes and functions related to
tge gui side application. This is where you will find the python
specific modes and panels and an already configured CodeEdit made
for python programming.
"""
import re
import sys
from pyqode.core.frontend import open_file as _open_file
from pyqode.python.frontend.code_edit import PyCodeEdit
from pyqode.python.frontend import modes
from pyqode.python.frontend import panels
def open_file(editor, path, replace_tabs_by_spaces=True):
"""
Extends pyqode.core.frontend.open_file to detect encoding from the script
coding tag (we can do that only for python scripts).
"""
def detect_encoding(path, default):
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
- http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations
.. note:: code taken and adapted from
```jedi.common.source_to_unicode.detect_encoding```
"""
import ast
with open(path, 'rb') as f:
source = f.read()
# take care of line encodings (not in jedi)
source = source.replace(b'\r', b'')
source_str = str(source).replace('\\n', '\n')
byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
return default
_open_file(editor, path, replace_tabs_by_spaces=replace_tabs_by_spaces,
detect_encoding_func=detect_encoding,
default_encoding='iso-8859-1')
| mit | Python |
27e1cfef33841bffe7dde4a1d6f28a1c7e9c93fe | add noreorder for message MessageHandle because of circular dependency | tilezen/tilequeue | tilequeue/queue/__init__.py | tilequeue/queue/__init__.py | from message import MessageHandle # noreorder
from file import OutputFileQueue
from memory import MemoryQueue
from redis_queue import make_redis_queue
from sqs import JobProgressException
from sqs import make_sqs_queue
from sqs import make_visibility_manager
from sqs import SqsQueue
__all__ = [
JobProgressException,
make_redis_queue,
make_sqs_queue,
make_visibility_manager,
MemoryQueue,
MessageHandle,
OutputFileQueue,
SqsQueue,
]
| from file import OutputFileQueue
from memory import MemoryQueue
from message import MessageHandle
from redis_queue import make_redis_queue
from sqs import JobProgressException
from sqs import make_sqs_queue
from sqs import make_visibility_manager
from sqs import SqsQueue
__all__ = [
JobProgressException,
make_redis_queue,
make_sqs_queue,
make_visibility_manager,
MemoryQueue,
MessageHandle,
OutputFileQueue,
SqsQueue,
]
| mit | Python |
127c80a48a1e69885e73c3819d5780e33ea816d4 | Add bloom-filter-cpp lib target | bbondy/bloom-filter-cpp,bbondy/bloom-filter-cpp,bbondy/bloom-filter-cpp,bbondy/bloom-filter-cpp | binding.gyp | binding.gyp | {
"targets": [{
"target_name": "bloom-filter-cpp",
"type": "lib",
"sources": [
"BloomFilter.cpp",
"BloomFilter.h"
],
"include_dirs": [
".",
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
"xcode_settings": {
"OTHER_CFLAGS": [ "-ObjC" ],
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11","-stdlib=libc++", "-v"],
"OTHER_LDFLAGS": ["-stdlib=libc++"],
"MACOSX_DEPLOYMENT_TARGET": "10.9",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
}, {
"target_name": "sample",
"type": "executable",
"sources": [
"main.cpp",
"BloomFilter.cpp",
"BloomFilter.h"
],
"include_dirs": [
".",
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
"xcode_settings": {
"OTHER_CFLAGS": [ "-ObjC" ],
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11","-stdlib=libc++", "-v"],
"OTHER_LDFLAGS": ["-stdlib=libc++"],
"MACOSX_DEPLOYMENT_TARGET": "10.9",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
}, {
"target_name": "test",
"type": "executable",
"sources": [
"./test/test-main.cpp",
"./test/test-bloom.cpp",
"./test/util.cpp",
"BloomFilter.cpp",
],
"include_dirs": [
".",
"<!(node -e \"require('cppunitlite')\")",
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"node_modules/cppunitlite/binding.gyp:CppUnitLite",
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
"xcode_settings": {
"OTHER_CFLAGS": [ "-ObjC" ],
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11","-stdlib=libc++", "-v"],
"OTHER_LDFLAGS": ["-stdlib=libc++"],
"MACOSX_DEPLOYMENT_TARGET": "10.9",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
}]
}
| {
"targets": [{
"target_name": "sample",
"type": "executable",
"sources": [
"main.cpp",
"BloomFilter.cpp",
"BloomFilter.h"
],
"include_dirs": [
".",
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
"xcode_settings": {
"OTHER_CFLAGS": [ "-ObjC" ],
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11","-stdlib=libc++", "-v"],
"OTHER_LDFLAGS": ["-stdlib=libc++"],
"MACOSX_DEPLOYMENT_TARGET": "10.9",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
}, {
"target_name": "test",
"type": "executable",
"sources": [
"./test/test-main.cpp",
"./test/test-bloom.cpp",
"./test/util.cpp",
"BloomFilter.cpp",
],
"include_dirs": [
".",
"<!(node -e \"require('cppunitlite')\")",
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"node_modules/cppunitlite/binding.gyp:CppUnitLite",
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
"xcode_settings": {
"OTHER_CFLAGS": [ "-ObjC" ],
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11","-stdlib=libc++", "-v"],
"OTHER_LDFLAGS": ["-stdlib=libc++"],
"MACOSX_DEPLOYMENT_TARGET": "10.9",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
}]
}
| mpl-2.0 | Python |
0ad3efcbc10f11195727532d515d5adb3640ed6b | add fibonacci demo | gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl | pynodegl-utils/pynodegl_utils/examples/misc.py | pynodegl-utils/pynodegl_utils/examples/misc.py | import math
from pynodegl import Texture, Shader, TexturedShape, Rotate, AnimKeyFrameScalar, Triangle
from pynodegl import Quad, UniformVec4, Camera, Group
from pynodegl_utils.misc import scene
@scene({'name': 'size', 'type': 'range', 'range': [0,1.5], 'unit_base': 1000})
def triangle(cfg, size=0.5):
frag_data = '''
#version 100
precision mediump float;
varying vec2 var_tex0_coords;
void main(void)
{
vec2 c = var_tex0_coords;
gl_FragColor = vec4(c.y-c.x, 1.0-c.y, c.x, 1.0);
}'''
b = size * math.sqrt(3) / 2.0
c = size * 1/2.
triangle = Triangle((0, size, 0), (b, -c, 0), (-b, -c, 0))
s = Shader(fragment_data=frag_data)
node = TexturedShape(triangle, s, Texture())
node = Rotate(node, axis=(0,0,1))
node.add_animkf(AnimKeyFrameScalar(0, 0),
AnimKeyFrameScalar(cfg.duration, -360*2))
return node
@scene({'name': 'n', 'type': 'range', 'range': [2,10]})
def fibo(cfg, n=8):
frag_data = '''
#version 100
precision mediump float;
uniform vec4 color;
void main(void) {
gl_FragColor = color;
}'''
s = Shader(fragment_data=frag_data)
fib = [0, 1, 1]
for i in range(2, n):
fib.append(fib[i] + fib[i-1])
fib = fib[::-1]
shift = 1/3. # XXX: what's the exact math here?
shape_scale = 1. / ((2.-shift) * sum(fib))
orig = (-shift, -shift, 0)
g = None
root = None
for i, x in enumerate(fib[:-1]):
w = x * shape_scale
gray = 1. - i/float(n)
color = [gray, gray, gray, 1]
q = Quad(orig, (w, 0, 0), (0, w, 0))
tshape = TexturedShape(q, s)
tshape.add_uniforms(UniformVec4("color", value=color))
new_g = Group()
rot = Rotate(new_g, axis=(0,0,1), anchor=orig)
rot.add_animkf(AnimKeyFrameScalar(0, 90, "exp_in_out"),
AnimKeyFrameScalar(cfg.duration/2, -90, "exp_in_out"),
AnimKeyFrameScalar(cfg.duration, 90))
if g:
g.add_children(rot)
else:
root = rot
g = new_g
new_g.add_children(tshape)
orig = (orig[0] + w, orig[1] + w, 0)
root = Camera(root)
root.set_eye(0.0, 0.0, 2.0)
root.set_up(0.0, 1.0, 0.0)
root.set_perspective(45.0, cfg.aspect_ratio, 1.0, 10.0)
return root
| import math
from pynodegl import Texture, Shader, TexturedShape, Rotate, AnimKeyFrameScalar, Triangle
from pynodegl_utils.misc import scene
@scene({'name': 'size', 'type': 'range', 'range': [0,1.5], 'unit_base': 1000})
def triangle(cfg, size=0.5):
frag_data = '''
#version 100
precision mediump float;
varying vec2 var_tex0_coords;
void main(void)
{
vec2 c = var_tex0_coords;
gl_FragColor = vec4(c.y-c.x, 1.0-c.y, c.x, 1.0);
}'''
b = size * math.sqrt(3) / 2.0
c = size * 1/2.
triangle = Triangle((0, size, 0), (b, -c, 0), (-b, -c, 0))
s = Shader(fragment_data=frag_data)
node = TexturedShape(triangle, s, Texture())
node = Rotate(node, axis=(0,0,1))
node.add_animkf(AnimKeyFrameScalar(0, 0),
AnimKeyFrameScalar(cfg.duration, -360*2))
return node
| apache-2.0 | Python |
4be2123258dd1184f3b105435cff53cf3af40d68 | fix encoding | yanne/webride,yanne/webride | webride/webride.py | webride/webride.py | #
# Copyright 2014 Janne Harkonen
# Distributed under the MIT License: http://opensource.org/licenses/MIT
#
import os
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def init():
names = os.listdir('testdata')
return render_template('main.html', suite_names=names)
@app.route("/datafile")
def fetch_datafile():
fname = request.args.get('path')
return open(os.path.join('testdata', fname)).read()
@app.route("/datafile/save", methods=['POST'])
def save_datafile():
with open('output/test_suite2.txt', 'w') as outfile:
outfile.write(request.form['value'])
return ''
if __name__ == "__main__":
app.run()
| #
# Copyright 2014 Janne Härkönen
# Distributed under the MIT License: http://opensource.org/licenses/MIT
#
import os
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def init():
names = os.listdir('testdata')
return render_template('main.html', suite_names=names)
@app.route("/datafile")
def fetch_datafile():
fname = request.args.get('path')
return open(os.path.join('testdata', fname)).read()
@app.route("/datafile/save", methods=['POST'])
def save_datafile():
with open('output/test_suite2.txt', 'w') as outfile:
outfile.write(request.form['value'])
return ''
if __name__ == "__main__":
app.run()
| mit | Python |
be370e6c13ef7db287c9fb5198f16f262d2c86e7 | add VumiBackend to __init__ so rapidsms.backends.vumi.VumiBackend works | peterayeni/rapidsms,eHealthAfrica/rapidsms,lsgunth/rapidsms,caktus/rapidsms,peterayeni/rapidsms,catalpainternational/rapidsms,lsgunth/rapidsms,catalpainternational/rapidsms,lsgunth/rapidsms,ehealthafrica-ci/rapidsms,ehealthafrica-ci/rapidsms,lsgunth/rapidsms,peterayeni/rapidsms,caktus/rapidsms,eHealthAfrica/rapidsms,ehealthafrica-ci/rapidsms,caktus/rapidsms,catalpainternational/rapidsms,peterayeni/rapidsms,eHealthAfrica/rapidsms,catalpainternational/rapidsms | rapidsms/backends/vumi/__init__.py | rapidsms/backends/vumi/__init__.py | from rapidsms.backends.vumi.outgoing import VumiBackend
__all__ = ('VumiBackend',)
| bsd-3-clause | Python | |
a26b88cd6b2034b7048d5b4729d3fa90ddfb255d | Update docs with actual MariaDB JSON support | j5int/sqlalchemy,sqlalchemy/sqlalchemy,monetate/sqlalchemy,zzzeek/sqlalchemy,j5int/sqlalchemy,monetate/sqlalchemy | lib/sqlalchemy/dialects/mysql/json.py | lib/sqlalchemy/dialects/mysql/json.py | # mysql/json.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7.
MariaDB supports JSON (as an alias for LONGTEXT) as of version 10.2.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| # mysql/json.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
support JSON at the time of this writing.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| mit | Python |
7e97722354f1b59eb97f29aa99102ff1212e33ce | Use parse_qsl instead of urldecode because facebook sends a pipe character in its output for client_credential grants. | lucidbard/requests-oauthlib,dongguangming/requests-oauthlib,elafarge/requests-oauthlib,abhi931375/requests-oauthlib,jayvdb/requests-oauthlib,jayvdb/requests-oauthlib,gras100/asks-oauthlib,jsfan/requests-oauthlib,sigmavirus24/requests-oauthlib,requests/requests-oauthlib,singingwolfboy/requests-oauthlib | requests_oauthlib/compliance_fixes/facebook.py | requests_oauthlib/compliance_fixes/facebook.py | from json import dumps
from oauthlib.common import urldecode
from urllib import parse_qsl
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if r.headers['content-type'] == 'application/json':
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if r.headers['content-type'] == 'text/plain' and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get('expires')
if expires is not None:
token['expires_in'] = expires
token['token_type'] = 'Bearer'
r._content = dumps(token)
return r
session.register_compliance_hook('access_token_response', _compliance_fix)
return session
| from json import dumps
from oauthlib.common import urldecode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if r.headers['content-type'] == 'application/json':
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if r.headers['content-type'] == 'text/plain' and r.status_code == 200:
token = dict(urldecode(r.text))
else:
return r
expires = token.get('expires')
if expires is not None:
token['expires_in'] = expires
token['token_type'] = 'Bearer'
r._content = dumps(token)
return r
session.register_compliance_hook('access_token_response', _compliance_fix)
return session
| isc | Python |
2f48c53dbe8bafcaca596ce454ba8e7d987d86b1 | Create test within input | ForestPride/rail-problem | station.py | station.py | """Creates the station class"""
class Station:
"""
Each train station is an instance of the Station class.
Methods:
__init__: creates a new stations
total_station_pop: calculates total station population
"""
def __init__(self):
while True:
self.capacity = int(eval(input("Enter the max capacity of the station: ")))
if type(capacity) == int:
print("You just set the max capacity!")
else:
print("Please enter a positive integer.")
self.escalators = int(eval(input("Enter the number of escalators in the station: ")))
#testfuntion()
self.train_wait = int(eval(input("Enter the wait time between trains in seconds: ")))
#testfuntion()
self.travelors_arriving = int(eval(input("How many people just exited the train? ")))
#testfuntion()
self.travelors_departing = int(eval(input("How many people are waiting for the train? ")))
#testfuntion()
| """Creates the station class"""
class Station:
"""
Each train station is an instance of the Station class.
Methods:
__init__: creates a new stations
total_station_pop: calculates total station population
"""
def __init__(self):
while True:
self.capacity = int(eval(input("Enter the max capacity of the station: ")))
if self.capacity == int:
break
else:
print("Please enter a positive integer.")
self.escalators = int(eval(input("Enter the number of escalators in the station: ")))
#testfuntion()
self.train_wait = int(eval(input("Enter the wait time between trains in seconds: ")))
#testfuntion()
self.travelors_arriving = int(eval(input("How many people just exited the train? ")))
#testfuntion()
self.travelors_departing = int(eval(input("How many people are waiting for the train? ")))
#testfuntion()
| mit | Python |
f2a961332ef25e21d2d8b96616edd30eaa8bd622 | Make sure DOIs are matched in a case-insensitive way | dissemin/dissemin,wetneb/dissemin,dissemin/dissemin,Lysxia/dissemin,wetneb/dissemin,dissemin/dissemin,dissemin/dissemin,Lysxia/dissemin,wetneb/dissemin,Lysxia/dissemin,wetneb/dissemin,Lysxia/dissemin,dissemin/dissemin | papers/doi.py | papers/doi.py | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from __future__ import unicode_literals
import re
# DOIs have very few limitations on what can appear in them
# see the standards
# hence a quite permissive regexp, as we use it in a controlled
# environment: fields of a metadata record and not plain text
doi_re = re.compile(r'^ *(?:[Dd][Oo][Ii] *[:=])? *(?:http://dx\.doi\.org/)?(10\.[0-9]{4,}[^ ]*/[^ ]+) *$')
openaire_doi_re = re.compile(r'info:eu-repo/semantics/altIdentifier/doi/(10\.[0-9]{4,}[^ ]*/[^ ]+) *')
def to_doi(candidate):
"""
>>> to_doi('http://dx.doi.org/10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('DOI: 10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('info:eu-repo/semantics/altIdentifier/doi/10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('10.1093/jhmas/XXXI.4.480')
u'10.1093/jhmas/xxxi.4.480'
"""
m = doi_re.match(candidate)
if m:
return m.groups()[0].lower()
else:
openaire_match = openaire_doi_re.match(candidate)
if openaire_match:
return openaire_match.group(1).lower()
| # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from __future__ import unicode_literals
import re
# DOIs have very few limitations on what can appear in them
# see the standards
# hence a quite permissive regexp, as we use it in a controlled
# environment: fields of a metadata record and not plain text
doi_re = re.compile(r'^ *(?:[Dd][Oo][Ii] *[:=])? *(?:http://dx\.doi\.org/)?(10\.[0-9]{4,}[^ ]*/[^ ]+) *$')
openaire_doi_re = re.compile(r'info:eu-repo/semantics/altIdentifier/doi/(10\.[0-9]{4,}[^ ]*/[^ ]+) *')
def to_doi(candidate):
"""
>>> to_doi('http://dx.doi.org/10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('DOI: 10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
>>> to_doi('info:eu-repo/semantics/altIdentifier/doi/10.1145/1721837.1721839')
u'10.1145/1721837.1721839'
"""
m = doi_re.match(candidate)
if m:
return m.groups()[0]
else:
openaire_match = openaire_doi_re.match(candidate)
if openaire_match:
return openaire_match.group(1)
| agpl-3.0 | Python |
1c6fb9e30b2b39af76d2a32066d45f142c598f65 | Fix call to call | petebachant/UNH-RVAT-turbinesFoam,petebachant/UNH-RVAT-turbinesFoam | paramsweep.py | paramsweep.py | #!/usr/bin/env python
"""
Run multiple simulations varying a single parameter.
"""
import foampy
from foampy.dictionaries import replace_value
import numpy as np
from subprocess import call
import os
import pandas as pd
from modules import processing as pr
def zero_tsr_fluc():
"""Set TSR fluctuation amplitude to zero."""
replace_value("system/fvOptions", "tsrAmplitude", 0.0)
def set_tsr(val):
"""Set mean tip speed ratio."""
print("Setting TSR to", val)
replace_value("system/fvOptions", "tipSpeedRatio", val)
def log_perf(param="tsr", append=True):
"""Log performance to file."""
if not os.path.isdir("processed"):
os.mkdir("processed")
fpath = "processed/{}_sweep.csv".format(param)
if append and os.path.isfile(fpath):
df = pd.read_csv(fpath)
else:
df = pd.DataFrame(columns=["tsr", "cp", "cd"])
df = df.append(pr.calc_perf(t1=3.0), ignore_index=True)
df.to_csv(fpath, index=False)
def tsr_sweep(start=0.4, stop=3.4, step=0.5, append=False):
"""Run over multiple TSRs. `stop` will be included."""
if not append and os.path.isfile("processed/tsr_sweep.csv"):
os.remove("processed/tsr_sweep.csv")
tsrs = np.arange(start, stop + 0.5*step, step)
zero_tsr_fluc()
cp = []
cd = []
for tsr in tsrs:
set_tsr(tsr)
if tsr == tsrs[0]:
call("./Allclean")
call("./Allrun")
else:
call("pimpleFoam | tee log.pimpleFoam", shell=True)
os.rename("log.pimpleFoam", "log.pimpleFoam." + str(tsr))
log_perf(append=True)
# Checkout original fvOptions
call(["git", "checkout", "system/fvOptions"])
if __name__ == "__main__":
tsr_sweep(0.4, 3.4, 0.5, append=False)
| #!/usr/bin/env python
"""
Run multiple simulations varying a single parameter.
"""
import foampy
from foampy.dictionaries import replace_value
import numpy as np
from subprocess import call
import os
import pandas as pd
from modules import processing as pr
def zero_tsr_fluc():
"""Set TSR fluctuation amplitude to zero."""
replace_value("system/fvOptions", "tsrAmplitude", 0.0)
def set_tsr(val):
"""Set mean tip speed ratio."""
print("Setting TSR to", val)
replace_value("system/fvOptions", "tipSpeedRatio", val)
def log_perf(param="tsr", append=True):
"""Log performance to file."""
if not os.path.isdir("processed"):
os.mkdir("processed")
fpath = "processed/{}_sweep.csv".format(param)
if append and os.path.isfile(fpath):
df = pd.read_csv(fpath)
else:
df = pd.DataFrame(columns=["tsr", "cp", "cd"])
df = df.append(pr.calc_perf(t1=3.0), ignore_index=True)
df.to_csv(fpath, index=False)
def tsr_sweep(start=0.4, stop=3.4, step=0.5, append=False):
"""Run over multiple TSRs. `stop` will be included."""
if not append and os.path.isfile("processed/tsr_sweep.csv"):
os.remove("processed/tsr_sweep.csv")
tsrs = np.arange(start, stop + 0.5*step, step)
zero_tsr_fluc()
cp = []
cd = []
for tsr in tsrs:
set_tsr(tsr)
if tsr == tsrs[0]:
call("./Allclean")
call("./Allrun")
else:
call("pimpleFoam | tee log.pimpleFoam", shell=True)
os.rename("log.pimpleFoam", "log.pimpleFoam." + str(tsr))
log_perf(append=True)
# Checkout original fvOptions
call["git", "checkout", "system/fvOptions"]
if __name__ == "__main__":
tsr_sweep(0.4, 3.4, 0.5, append=False)
| mit | Python |
baf9c5a6523a9b90db7572330d04e3e199391273 | bump version | tlatsas/wigiki | wigiki/__init__.py | wigiki/__init__.py | """A static site generator which uses Github's gists as pages"""
__author__ = "Tasos Latsas"
__version__ = "0.6.4"
| """A static site generator which uses Github's gists as pages"""
__author__ = "Tasos Latsas"
__version__ = "0.6.2"
| mit | Python |
b0c1c660a3629bfc7103d00c502e02e2d5bbed60 | fix linting | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/utils.py | accelerator/utils.py | from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from bullet_train import BulletTrain
BULLET_TRAIN_ENVIRONMENT_ID = "aX45EUqSsAqhTvv5nW7WEL"
def create_mc_permission(permission):
ct, _ = ContentType.objects.get_or_create(
app_label="mc",
model=permission.content_type.model)
new_perm, _ = Permission.objects.get_or_create(
name=permission.name,
content_type=ct,
codename=permission.codename)
for group in permission.group_set.all():
group.permissions.add(new_perm)
for user in permission.user_set.all():
user.user_permissions.add(new_perm)
def bullet_train_access_util(feature_name):
bt = BulletTrain(environment_id=BULLET_TRAIN_ENVIRONMENT_ID)
if bt:
if bt.has_feature(feature_name):
return bt.feature_enabled(feature_name)
return False
| from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from bullet_train import BulletTrain
BULLET_TRAIN_ENVIRONMENT_ID ="aX45EUqSsAqhTvv5nW7WEL"
def create_mc_permission(permission):
ct, _ = ContentType.objects.get_or_create(
app_label="mc",
model=permission.content_type.model)
new_perm, _ = Permission.objects.get_or_create(
name=permission.name,
content_type=ct,
codename=permission.codename)
for group in permission.group_set.all():
group.permissions.add(new_perm)
for user in permission.user_set.all():
user.user_permissions.add(new_perm)
def bullet_train_access_util(feature_name):
bt = BulletTrain(environment_id=BULLET_TRAIN_ENVIRONMENT_ID)
if bt:
if bt.has_feature(feature_name):
return bt.feature_enabled(feature_name)
return False
| mit | Python |
61be34225ae617d43cf7ca8c93c021523d93d8ca | fix #34 - use Session.evaluated_env_list to respect TOX_SKIP_ENV | tox-dev/detox | detox/cli.py | detox/cli.py | from __future__ import print_function
import sys
from tox.session import prepare as tox_prepare
from detox import __version__
from detox.proc import Detox
def main(args=None):
args = sys.argv[1:] if args is None else args
if args and args[0] == "--version":
print("detox {} running as plugin in tox:".format(__version__))
# fall through to let tox add its own version info ...
config = tox_prepare(args)
detox = Detox(config)
if not hasattr(config.option, "quiet_level") or not config.option.quiet_level:
detox.startloopreport()
return detox.runtestsmulti(detox.toxsession.evaluated_env_list())
| from __future__ import print_function
import sys
from tox.session import prepare as tox_prepare
from detox import __version__
from detox.proc import Detox
def main(args=None):
args = sys.argv[1:] if args is None else args
if args and args[0] == "--version":
print("detox {} running as plugin in tox:".format(__version__))
# fall through to let tox add its own version info ...
config = tox_prepare(args)
detox = Detox(config)
if not hasattr(config.option, "quiet_level") or not config.option.quiet_level:
detox.startloopreport()
return detox.runtestsmulti(config.envlist)
| mit | Python |
1513c99d7bd7c06825cea757cbd292547c4c8350 | Exclude anything starting with '::' or '__' | James-Yu/LaTeX-Workshop,James-Yu/LaTeX-Workshop,James-Yu/LaTeX-Workshop,James-Yu/LaTeX-Workshop,James-Yu/LaTeX-Workshop | dev/func3.py | dev/func3.py | from pathlib import Path
import re
import json
dtx_files = Path('/usr/local/texlive/2019/texmf-dist/source/latex/l3kernel/').glob('*.dtx')
def exclude(entry: str) -> bool:
return not re.match(r'\\(?!(?:::)|(?:__))', entry)
def parse_doc_block(block_content, _type):
objs = []
for match in re.findall(rf'\\begin{{{_type}}}(?:\[[^\]]*\])?[\s\n%]*{{([^}}]*)}}', block_content, flags=re.M):
obj_str = match.replace('%', '')
objs.extend([m for m in (o.strip() for o in ''.join(obj_str).split(',')) if not exclude(m)])
return objs
def parse_file(fpath, _type):
objs = []
inside_documentation = False
block_start = None
block_end = None
with open(fpath) as fp:
lines = fp.readlines()
# content = '\n'.join(lines)
for i, line in enumerate(lines):
if re.search(r'\\begin{documentation}', line):
inside_documentation = True
block_start = i
continue
if not inside_documentation:
continue
if inside_documentation and re.search(r'\\end{documentation}', line):
inside_documentation = False
block_end = i
content = ''.join(lines[block_start:block_end])
objs.extend(parse_doc_block(content, _type))
return objs
functions = {}
for f in dtx_files:
if f.match('l3doc.dtx'):
continue
ans = parse_file(f.as_posix(), 'function')
ans.extend(parse_file(f.as_posix(), 'variable'))
if len(ans) > 0:
functions[f.name] = list(set(ans))
json.dump(functions, open('funcs.json', 'w'), indent=2)
| from pathlib import Path
import re
import json
dtx_files = Path('/usr/local/texlive/2019/texmf-dist/source/latex/l3kernel/').glob('*.dtx')
def parse_doc_block(block_content, _type):
objs = []
for match in re.findall(rf'\\begin{{{_type}}}(?:\[[^\]]*\])?[\s\n%]*{{([^}}]*)}}', block_content, flags=re.M):
obj_str = match.replace('%', '')
objs.extend([m for m in (o.strip() for o in ''.join(obj_str).split(',')) if m.startswith('\\')])
return objs
def parse_file(fpath, _type):
objs = []
inside_documentation = False
block_start = None
block_end = None
with open(fpath) as fp:
lines = fp.readlines()
# content = '\n'.join(lines)
for i, line in enumerate(lines):
if re.search(r'\\begin{documentation}', line):
inside_documentation = True
block_start = i
continue
if not inside_documentation:
continue
if inside_documentation and re.search(r'\\end{documentation}', line):
inside_documentation = False
block_end = i
content = ''.join(lines[block_start:block_end])
objs.extend(parse_doc_block(content, _type))
return objs
functions = {}
for f in dtx_files:
if f.match('l3doc.dtx'):
continue
ans = parse_file(f.as_posix(), 'function')
ans.extend(parse_file(f.as_posix(), 'variable'))
if len(ans) > 0:
functions[f.name] = list(set(ans))
json.dump(functions, open('funcs.json', 'w'), indent=2)
| mit | Python |
f46b655ed924ff5238d08e4aa0a660879976507e | Bump version. | wido/libcloud,t-tran/libcloud,Scalr/libcloud,samuelchong/libcloud,JamesGuthrie/libcloud,sergiorua/libcloud,thesquelched/libcloud,wido/libcloud,mbrukman/libcloud,lochiiconnectivity/libcloud,watermelo/libcloud,vongazman/libcloud,carletes/libcloud,MrBasset/libcloud,carletes/libcloud,mathspace/libcloud,smaffulli/libcloud,munkiat/libcloud,supertom/libcloud,munkiat/libcloud,briancurtin/libcloud,Cloud-Elasticity-Services/as-libcloud,illfelder/libcloud,schaubl/libcloud,dcorbacho/libcloud,vongazman/libcloud,DimensionDataCBUSydney/libcloud,kater169/libcloud,andrewsomething/libcloud,wuyuewen/libcloud,NexusIS/libcloud,sahildua2305/libcloud,pantheon-systems/libcloud,aleGpereira/libcloud,SecurityCompass/libcloud,niteoweb/libcloud,SecurityCompass/libcloud,pquentin/libcloud,supertom/libcloud,andrewsomething/libcloud,sahildua2305/libcloud,dcorbacho/libcloud,jimbobhickville/libcloud,Kami/libcloud,cryptickp/libcloud,jerryblakley/libcloud,ClusterHQ/libcloud,mbrukman/libcloud,thesquelched/libcloud,mgogoulos/libcloud,sfriesel/libcloud,sfriesel/libcloud,munkiat/libcloud,mgogoulos/libcloud,illfelder/libcloud,iPlantCollaborativeOpenSource/libcloud,Cloud-Elasticity-Services/as-libcloud,mtekel/libcloud,marcinzaremba/libcloud,briancurtin/libcloud,jimbobhickville/libcloud,aviweit/libcloud,mistio/libcloud,wuyuewen/libcloud,erjohnso/libcloud,jerryblakley/libcloud,briancurtin/libcloud,Verizon/libcloud,wido/libcloud,mbrukman/libcloud,lochiiconnectivity/libcloud,ByteInternet/libcloud,ByteInternet/libcloud,watermelo/libcloud,JamesGuthrie/libcloud,cryptickp/libcloud,techhat/libcloud,mathspace/libcloud,cloudControl/libcloud,MrBasset/libcloud,smaffulli/libcloud,vongazman/libcloud,t-tran/libcloud,StackPointCloud/libcloud,jerryblakley/libcloud,niteoweb/libcloud,aleGpereira/libcloud,lochiiconnectivity/libcloud,sgammon/libcloud,ZuluPro/libcloud,sergiorua/libcloud,mistio/libcloud,StackPointCloud/libcloud,Itxaka/libcloud,kater169/libcloud,NexusIS/libcloud,watermelo/libcloud,schaubl/libcloud,apache/libcloud,ZuluPro/libcloud,techhat/libcloud,mathspace/libcloud,SecurityCompass/libcloud,cloudControl/libcloud,Jc2k/libcloud,dcorbacho/libcloud,mgogoulos/libcloud,curoverse/libcloud,erjohnso/libcloud,mistio/libcloud,atsaki/libcloud,MrBasset/libcloud,DimensionDataCBUSydney/libcloud,sahildua2305/libcloud,sgammon/libcloud,NexusIS/libcloud,sfriesel/libcloud,aviweit/libcloud,jimbobhickville/libcloud,iPlantCollaborativeOpenSource/libcloud,Kami/libcloud,schaubl/libcloud,ninefold/libcloud,Scalr/libcloud,aviweit/libcloud,Verizon/libcloud,smaffulli/libcloud,thesquelched/libcloud,ByteInternet/libcloud,Itxaka/libcloud,marcinzaremba/libcloud,kater169/libcloud,Scalr/libcloud,wrigri/libcloud,atsaki/libcloud,JamesGuthrie/libcloud,erjohnso/libcloud,ClusterHQ/libcloud,niteoweb/libcloud,wrigri/libcloud,StackPointCloud/libcloud,ninefold/libcloud,Itxaka/libcloud,andrewsomething/libcloud,t-tran/libcloud,DimensionDataCBUSydney/libcloud,wrigri/libcloud,Kami/libcloud,apache/libcloud,pantheon-systems/libcloud,cryptickp/libcloud,illfelder/libcloud,wuyuewen/libcloud,curoverse/libcloud,atsaki/libcloud,pquentin/libcloud,Verizon/libcloud,Cloud-Elasticity-Services/as-libcloud,curoverse/libcloud,ZuluPro/libcloud,supertom/libcloud,apache/libcloud,aleGpereira/libcloud,mtekel/libcloud,cloudControl/libcloud,Jc2k/libcloud,techhat/libcloud,pantheon-systems/libcloud,carletes/libcloud,pquentin/libcloud,sergiorua/libcloud,mtekel/libcloud,iPlantCollaborativeOpenSource/libcloud,samuelchong/libcloud,marcinzaremba/libcloud,samuelchong/libcloud | libcloud/__init__.py | libcloud/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud provides a unified interface to the cloud computing resources.
@var __version__: Current version of libcloud
"""
__all__ = ['__version__', 'enable_debug']
__version__ = '0.10.1'
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
@param fo: Where to append debugging information
@type fo: File like object, only write operations are used.
"""
from libcloud.common.base import (Connection,
LoggingHTTPConnection,
LoggingHTTPSConnection)
LoggingHTTPSConnection.log = fo
LoggingHTTPConnection.log = fo
Connection.conn_classes = (LoggingHTTPConnection,
LoggingHTTPSConnection)
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists
is where we will log debug information about the provider transports.
"""
import os
path = os.getenv('LIBCLOUD_DEBUG')
if path:
fo = open(path, 'a')
enable_debug(fo)
if have_paramiko:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
_init_once()
| # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud provides a unified interface to the cloud computing resources.
@var __version__: Current version of libcloud
"""
__all__ = ['__version__', 'enable_debug']
__version__ = '0.10.0'
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
@param fo: Where to append debugging information
@type fo: File like object, only write operations are used.
"""
from libcloud.common.base import (Connection,
LoggingHTTPConnection,
LoggingHTTPSConnection)
LoggingHTTPSConnection.log = fo
LoggingHTTPConnection.log = fo
Connection.conn_classes = (LoggingHTTPConnection,
LoggingHTTPSConnection)
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists
is where we will log debug information about the provider transports.
"""
import os
path = os.getenv('LIBCLOUD_DEBUG')
if path:
fo = open(path, 'a')
enable_debug(fo)
if have_paramiko:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
_init_once()
| apache-2.0 | Python |
d7fe1ef2ec3cc19c234d228cbdbd3a0067801e46 | Fix typo in addExpense | Zillolo/mana-vault,Zillolo/mana-vault,Zillolo/mana-vault | app/mod_budget/controller.py | app/mod_budget/controller.py | from flask import Blueprint, flash, redirect, render_template, request, session, \
url_for
from wtforms.fields.html5 import DecimalRangeField
from bson.objectid import ObjectId
from app import logger
from app.mod_budget.form import AddEntryForm, EditBudgetForm
from app.mod_budget.model import Category, CategoryBudget, Expense, Income
from app.mod_auth.helper import requireAuth
from app.mod_auth.model import User
budget = Blueprint('budget', __name__, template_folder = 'templates')
@budget.route('/')
@requireAuth()
def default():
return "Hello World!"
@budget.route('/income/add', methods = ['GET', 'POST'])
@requireAuth()
def addIncome():
form = AddEntryForm(request.form)
# Load the categories from the DB into the SelectField
form.loadCategories()
if request.method == 'POST' and form.validate():
income = Income()
form.populate_obj(income)
# Insert category into the ReferenceField.
income.category = Category.objects(id = ObjectId(income.category)).first()
# Insert owner into the ReferenceField.
userId = ObjectId(session.get('user')['_id']['$oid'])
income.owner = User.objects(id = userId).first()
income.save()
logger.debug('{0} added Income({1}, {2}, {3})'.format(
session.get('user')['username'], income.amount, income.description,
income.category.name))
flash('Your income has been added.')
return redirect(url_for('budget.default'))
return render_template('budget/income/add.html', form = form)
@budget.route('/expense/add', methods = ['GET', 'POST'])
@requireAuth()
def addExpense():
form = AddEntryForm(request.form)
# Load the categories from the DB into the SelectField
form.loadCategories()
if request.method == 'POST' and form.validate():
expense = Expense()
form.populate_obj(expense)
# Insert category into the ReferenceField.
expense.category = Category.objects(id = ObjectId(expense.category)).first()
# Insert owner into the ReferenceField.
userId = ObjectId(session.get('user')['_id']['$oid'])
expense.owner = User.objects(id = userId).first()
expense.save()
logger.debug('{0} added Income({1}, {2}, {3})'.format(
session.get('user')['username'], income.amount, income.description,
income.category.name))
flash('Your expense has been added.')
return redirect(url_for('budget.default'))
return render_template('budget/expense/add.html', form = form)
| from flask import Blueprint, flash, redirect, render_template, request, session, \
url_for
from wtforms.fields.html5 import DecimalRangeField
from bson.objectid import ObjectId
from app import logger
from app.mod_budget.form import AddEntryForm, EditBudgetForm
from app.mod_budget.model import Category, CategoryBudget, Expense, Income
from app.mod_auth.helper import requireAuth
from app.mod_auth.model import User
budget = Blueprint('budget', __name__, template_folder = 'templates')
@budget.route('/')
@requireAuth()
def default():
return "Hello World!"
@budget.route('/income/add', methods = ['GET', 'POST'])
@requireAuth()
def addIncome():
form = AddEntryForm(request.form)
# Load the categories from the DB into the SelectField
form.loadCategories()
if request.method == 'POST' and form.validate():
income = Income()
form.populate_obj(income)
# Insert category into the ReferenceField.
income.category = Category.objects(id = ObjectId(income.category)).first()
# Insert owner into the ReferenceField.
userId = ObjectId(session.get('user')['_id']['$oid'])
income.owner = User.objects(id = userId).first()
income.save()
logger.debug('{0} added Income({1}, {2}, {3})'.format(
session.get('user')['username'], income.amount, income.description,
income.category.name))
flash('Your income has been added.')
return redirect(url_for('budget.default'))
return render_template('budget/income/add.html', form = form)
@budget.route('/expense/add', methods = ['GET', 'POST'])
@requireAuth()
def addExpense():
form = AddEntryForm(request.form)
# Load the categories from the DB into the SelectField
form.loadCategories()
if request.method == 'POST' and form.validate():
expense = Expense()
form.populate_obj(expense)
# Insert category into the ReferenceField.
expense.category = Category.objects(id = ObjectId(expense.category)).first()
# Insert owner into the ReferenceField.
userId = ObjectId(session.get('user')['_id']['$oid'])
income.owner = User.objects(id = userId).first()
expense.save()
logger.debug('{0} added Income({1}, {2}, {3})'.format(
session.get('user')['username'], income.amount, income.description,
income.category.name))
flash('Your expense has been added.')
return redirect(url_for('budget.default'))
return render_template('budget/expense/add.html', form = form)
| mit | Python |
7906532576d13f28a353ca3ac628a9ff69bf6fcd | Kill unused import statement | MagicStack/MagicPython,MagicStack/MagicPython,MagicStack/MagicPython | sublime.py | sublime.py | """A plugin for Sublime Text to enhance f-string editing experience.
Specifically, this plugin simplifies typing of escaped curly braces
in an f-string:
{|}, where | is for cursir, gets replaced with
{{|, when '{' is typed again.
"""
import sublime_plugin
class FstringbraceCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
view.run_command('right_delete')
view.run_command('insert', {'characters': '{'})
| """A plugin for Sublime Text to enhance f-string editing experience.
Specifically, this plugin simplifies typing of escaped curly braces
in an f-string:
{|}, where | is for cursir, gets replaced with
{{|, when '{' is typed again.
"""
import sublime
import sublime_plugin
class FstringbraceCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
view.run_command('right_delete')
view.run_command('insert', {'characters': '{'})
| mit | Python |
b3041b7aab663b8d02c08826142922e938d05341 | Update news | kuastw/AP-API,JohnSounder/AP-API,kuastw/AP-API,JohnSounder/AP-API | kuas/news.py | kuas/news.py | # -*- coding: utf-8 -*-
ENABLE = 1
NEWS_ID = 25
NEWS_TITLE = "高應盃籃球錦標賽"
NEWS_IMAGE = "http://i.imgur.com/NAxVxbV.jpg"
NEWS_URL = "http://goo.gl/Yh1iIF"
NEWS_CONTENT = """
"""
def news_status():
return [ENABLE, NEWS_ID]
def news():
"""
News for kuas.
return [enable, news_id, news_title, news_template, news_url]
enable: bool
news_id: int
news_title: string
news_tempalte: string
news_url: string
"""
news_title = NEWS_TITLE
news_template = (
"<div style='text-align:center;'>"
"<div><img style='display:block;margin-left:auto;margin-right:auto;max-width:250px;min-height:150px;height:auto;' ng-cache src='"
+ NEWS_IMAGE + "'></img>" + NEWS_CONTENT + "</div>" +
"</div>"
)
news_url = NEWS_URL
return [ENABLE, NEWS_ID, news_title, news_template, news_url]
| # -*- coding: utf-8 -*-
ENABLE = 1
NEWS_ID = 25
NEWS_TITLE = "高應盃籃球錦標賽"
NEWS_IMAGE = "http://i.imgur.com/NAxVxbV.jpg"
NEWS_URL = "http://goo.gl/Yh1iIF"
NEWS_CONTENT = """
"""
def news_status():
return [ENABLE, NEWS_ID]
def news():
"""
News for kuas.
return [enable, news_id, news_title, news_template, news_url]
enable: bool
news_id: int
news_title: string
news_tempalte: string
news_url: string
"""
news_title = NEWS_TITLE
news_template = (
"<div style='text-align:center;margin-top:-15px'>"
"<div><img style='display:block;margin-left:auto;margin-right:auto;margin-bottom:-15px;max-width:250px;min-height:150px;height:auto;' src='"
+ NEWS_IMAGE + "'></img>" + NEWS_CONTENT + "</div>" +
"</div>"
)
news_url = NEWS_URL
return [ENABLE, NEWS_ID, news_title, news_template, news_url]
| mit | Python |
6f88063fae644bd55d3dbe7688d6f4f19aa9115d | bump version to 7.0.1.dev0 | praw-dev/praw,praw-dev/praw | praw/const.py | praw/const.py | """PRAW constants."""
from .endpoints import API_PATH # noqa: F401
__version__ = "7.0.1.dev0"
USER_AGENT_FORMAT = "{} PRAW/" + __version__
MAX_IMAGE_SIZE = 512000
MIN_JPEG_SIZE = 128
MIN_PNG_SIZE = 67
JPEG_HEADER = b"\xff\xd8\xff"
PNG_HEADER = b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
| """PRAW constants."""
from .endpoints import API_PATH # noqa: F401
__version__ = "7.0.0"
USER_AGENT_FORMAT = "{} PRAW/" + __version__
MAX_IMAGE_SIZE = 512000
MIN_JPEG_SIZE = 128
MIN_PNG_SIZE = 67
JPEG_HEADER = b"\xff\xd8\xff"
PNG_HEADER = b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
| bsd-2-clause | Python |
2746e85066c0bf7c2f62b7f384a9a0ccd1179eb1 | indent the json in pre-commit hook so diffs look nice | NYUDataBootcamp/Materials | pre-commit.py | pre-commit.py | #!/usr/bin/env python
import os
import json
import fnmatch
import subprocess
def strip_output_cell(fn):
changed = False
with open(fn, "r") as f:
js = json.load(f)
for cell in js["cells"]:
if cell["cell_type"] == "code":
if len(cell["outputs"]) != 0:
cell["outputs"] = []
changed = True
if cell["execution_count"] is not None:
cell["execution_count"] = None
changed = True
if changed:
with open(fn, "w") as f:
json.dump(js, f, indent=2)
repo_root = os.popen("git rev-parse --show-toplevel").read().strip()
for dirpath, dirnames, files in os.walk(repo_root):
if not "/." in dirpath:
for f in fnmatch.filter(files, "*.ipynb"):
fn = os.path.join(dirpath, f)
strip_output_cell(fn)
subprocess.call(["git", "add", fn])
| #!/usr/bin/env python
import os
import json
import fnmatch
import subprocess
def strip_output_cell(fn):
changed = False
with open(fn, "r") as f:
js = json.load(f)
for cell in js["cells"]:
if cell["cell_type"] == "code":
if len(cell["outputs"]) != 0:
cell["outputs"] = []
changed = True
if cell["execution_count"] is not None:
cell["execution_count"] = None
changed = True
if changed:
with open(fn, "w") as f:
json.dump(js, f)
repo_root = os.popen("git rev-parse --show-toplevel").read().strip()
for dirpath, dirnames, files in os.walk(repo_root):
if not "/." in dirpath:
for f in fnmatch.filter(files, "*.ipynb"):
fn = os.path.join(dirpath, f)
strip_output_cell(fn)
subprocess.call(["git", "add", fn])
| mit | Python |
20809c17d6e7cb4e71f52b9cdc2755d2ce7b0ac1 | bump version to 0.8.0 | CamDavidsonPilon/lifelines,jstoxrocky/lifelines,wavelets/lifelines,nerdless/lifelines | lifelines/version.py | lifelines/version.py | from __future__ import unicode_literals
__version__ = '0.8.0.0'
| from __future__ import unicode_literals
__version__ = '0.7.1.0'
| mit | Python |
94a91f128b046a818acb873579c8aadd41aa5c3a | Fix remaining load_suparsers typo instance | msmakhlouf/streamparse,codywilbourn/streamparse,petchat/streamparse,petchat/streamparse,Parsely/streamparse,msmakhlouf/streamparse,petchat/streamparse,petchat/streamparse,msmakhlouf/streamparse,msmakhlouf/streamparse,codywilbourn/streamparse,eric7j/streamparse,petchat/streamparse,crohling/streamparse,hodgesds/streamparse,msmakhlouf/streamparse,hodgesds/streamparse,phanib4u/streamparse,Parsely/streamparse,eric7j/streamparse,crohling/streamparse,phanib4u/streamparse | test/streamparse/cli/test_sparse.py | test/streamparse/cli/test_sparse.py | from __future__ import absolute_import, unicode_literals
import argparse
import unittest
from streamparse.cli import sparse
from nose.tools import ok_
class SparseTestCase(unittest.TestCase):
def test_load_subparsers(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
sparse.load_subparsers(subparsers)
# grab subcommands from subparsers
subcommands = parser._optionals._actions[1].choices.keys()
# we know quickstart will be a subcommand test others as needed
ok_('quickstart' in subcommands)
if __name__ == '__main__':
unittest.main()
| from __future__ import absolute_import, unicode_literals
import argparse
import unittest
from streamparse.cli import sparse
from nose.tools import ok_
class SparseTestCase(unittest.TestCase):
def test_load_subparsers(self):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
sparse.load_suparsers(subparsers)
# grab subcommands from subparsers
subcommands = parser._optionals._actions[1].choices.keys()
# we know quickstart will be a subcommand test others as needed
ok_('quickstart' in subcommands)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
e0a0d4e8d403fec54cf9dae6c22c63c5fe21b9c0 | Modify description of replication() | kolyat/chainsyn,kolyat/chainsyn | processing.py | processing.py | # Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of chainsyn, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Module "processing" with processing functions
replication() - function of DNA replication
"""
def replication(dna_chain):
"""
Function of DNA replication (DNA -> DNA)
Arguments:
dna_chain -- string, list or tuple with nucleotides (A, T, C, G)
Returns list with nucleotides of second DNA chain
Raises an exception if fails:
- TypeError -- when dna_chain is not string, list or tuple;
- ValueError -- when dna_chain is empty or contains forbidden
characters (non-alphabetic)
- KeyError - when dna_chain contains not valid nucleotides
"""
dna_pattern = {
'a': 't', # Adenine associates with thymine (A-T)
't': 'a', # Thymine associates with adenine (T-A)
'c': 'g', # Cytosine associates with guanine (C-G)
'g': 'c' # Guanine associates with cytosine (G-C)
}
# Check if dna_chain is correct type and not empty
if type(dna_chain) not in (str, list, tuple):
raise TypeError
if len(dna_chain) == 0:
raise ValueError
# Try to convert input dna_chain to list of nucleotides
dna1_chain = []
for el in list(dna_chain):
try:
dna1_chain.append(el.lower())
except ValueError:
# dna_chain might contain non-alphabetic characters
break
# Try to replicate DNA chain
dna2_chain = []
for n in dna1_chain:
if n in dna_pattern:
dna2_chain.append(dna_pattern[n])
else:
raise KeyError
return dna2_chain
| # Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of chainsyn, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Module "processing" with processing functions
replication() - function of DNA replication
"""
def replication(dna_chain):
"""
Function of DNA replication (DNA -> DNA)
Arguments:
dna_chain -- string, list or tuple with nucleotides (A, T, C, G)
Returns list with nucleotides of second DNA chain
"""
dna_pattern = {
'a': 't', # Adenine associates with thymine (A-T)
't': 'a', # Thymine associates with adenine (T-A)
'c': 'g', # Cytosine associates with guanine (C-G)
'g': 'c' # Guanine associates with cytosine (G-C)
}
# Check if dna_chain is correct type and not empty
t = (str, list, tuple)
if type(dna_chain) not in t:
raise TypeError
if len(dna_chain) == 0:
raise ValueError
# Try to convert input dna_chain to list of nucleotides
dna1_chain = []
for el in list(dna_chain):
try:
dna1_chain.append(el.lower())
except ValueError:
# dna_chain might contain non-alphabetic characters
break
# Replicate DNA chain
dna2_chain = []
for n in dna1_chain:
if n in dna_pattern:
dna2_chain.append(dna_pattern[n])
else:
raise KeyError
return dna2_chain
| mit | Python |
26dcd1ce43864de77c1cd26065c09cc2b4c4788e | Make use of chained comparisons | akosthekiss/fuzzinator,akosthekiss/fuzzinator,akosthekiss/fuzzinator,renatahodovan/fuzzinator,renatahodovan/fuzzinator,renatahodovan/fuzzinator,akosthekiss/fuzzinator,renatahodovan/fuzzinator | tests/fuzzer/test_random_content.py | tests/fuzzer/test_random_content.py | # Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import pytest
import fuzzinator
@pytest.mark.parametrize('fuzzer_kwargs, exp_min_len, exp_max_len', [
({}, 1, 1),
({'max_length': '100'}, 1, 100),
({'min_length': '10', 'max_length': '100'}, 10, 100),
])
def test_random_content(fuzzer_kwargs, exp_min_len, exp_max_len):
for index in range(100):
out = fuzzinator.fuzzer.RandomContent(index=index, **fuzzer_kwargs)
out_len = len(out)
assert exp_min_len <= out_len <= exp_max_len
| # Copyright (c) 2016 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import pytest
import fuzzinator
@pytest.mark.parametrize('fuzzer_kwargs, exp_min_len, exp_max_len', [
({}, 1, 1),
({'max_length': '100'}, 1, 100),
({'min_length': '10', 'max_length': '100'}, 10, 100),
])
def test_random_content(fuzzer_kwargs, exp_min_len, exp_max_len):
for index in range(100):
out = fuzzinator.fuzzer.RandomContent(index=index, **fuzzer_kwargs)
out_len = len(out)
assert out_len >= exp_min_len and out_len <= exp_max_len
| bsd-3-clause | Python |
c2d3a3a4a6ad43115459977d0d88a4878d0fb33a | Increase pylons.url testing to 100% | moreati/pylons,Pylons/pylons,Pylons/pylons,moreati/pylons,moreati/pylons,Pylons/pylons | pylons/url.py | pylons/url.py | from repoze.bfg.encode import urlencode
from repoze.bfg.threadlocal import get_current_registry
from repoze.bfg.url import _join_elements
from pylons.interfaces import IRoutesMapper
def route_url(route_name, request, *elements, **kw):
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
mapper = reg.getUtility(IRoutesMapper)
route = mapper.routes.get(route_name)
if route and 'custom_url_generator' in route.__dict__:
route_name, request, elements, kw = route.custom_url_generator(route_name, request, *elements, **kw)
anchor = ''
qs = ''
app_url = None
if '_query' in kw:
qs = '?' + urlencode(kw.pop('_query'), doseq=True)
if '_anchor' in kw:
anchor = kw.pop('_anchor')
if isinstance(anchor, unicode):
anchor = anchor.encode('utf-8')
anchor = '#' + anchor
if '_app_url' in kw:
app_url = kw.pop('_app_url')
path = mapper.generate(route_name, kw) # raises KeyError if generate fails
if elements:
suffix = _join_elements(elements)
if not path.endswith('/'):
suffix = '/' + suffix
else:
suffix = ''
if app_url is None:
# we only defer lookup of application_url until here because
# it's somewhat expensive; we won't need to do it if we've
# been passed _app_url
app_url = request.application_url
return app_url + path + suffix + qs + anchor
| from repoze.bfg.threadlocal import get_current_registry
from pylons.interfaces import IRoutesMapper
def route_url(route_name, request, *elements, **kw):
try:
reg = request.registry
except AttributeError:
reg = get_current_registry() # b/c
mapper = reg.getUtility(IRoutesMapper)
route = mapper.routes.get(route_name)
if 'custom_url_generator' in route.__dict__:
return route.custom_url_generator(route_name, request, *elements, **kw)
anchor = ''
qs = ''
app_url = None
if '_query' in kw:
qs = '?' + urlencode(kw.pop('_query'), doseq=True)
if '_anchor' in kw:
anchor = kw.pop('_anchor')
if isinstance(anchor, unicode):
anchor = anchor.encode('utf-8')
anchor = '#' + anchor
if '_app_url' in kw:
app_url = kw.pop('_app_url')
path = mapper.generate(route_name, kw) # raises KeyError if generate fails
if elements:
suffix = _join_elements(elements)
if not path.endswith('/'):
suffix = '/' + suffix
else:
suffix = ''
if app_url is None:
# we only defer lookup of application_url until here because
# it's somewhat expensive; we won't need to do it if we've
# been passed _app_url
app_url = request.application_url
return app_url + path + suffix + qs + anchor
| bsd-3-clause | Python |
5edc773be179ebc21d65e8b06e8d66804b8f6b5f | Use 1 as default first index | pearsonlab/thunder,zhwa/thunder,broxtronix/thunder,poolio/thunder,broxtronix/thunder,thunder-project/thunder,pearsonlab/thunder,jwittenbach/thunder,j-friedrich/thunder,oliverhuangchao/thunder,kcompher/thunder,j-friedrich/thunder,oliverhuangchao/thunder,zhwa/thunder,kunallillaney/thunder,kcompher/thunder,poolio/thunder,mikarubi/thunder,kunallillaney/thunder,mikarubi/thunder | python/ref.py | python/ref.py | # ref <master> <inputFile> <inds> <outputFile>
#
# compute summary statistics on an xyz stack
# each row is (x,y,z,timeseries)
#
import sys
import os
from numpy import *
from scipy.linalg import *
from scipy.sparse import *
from scipy.io import *
from pyspark import SparkContext
import logging
if len(sys.argv) < 4:
print >> sys.stderr, \
"(ref) usage: ref <master> <inputFile> <outputFile> <mode>"
exit(-1)
def parseVector(line):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[3:]) # get tseries
return ((int(vec[0]),int(vec[1]),int(vec[2])),ts) # (x,y,z),(tseries) pair
# parse inputs
sc = SparkContext(sys.argv[1], "ref")
inputFile = str(sys.argv[2])
outputFile = str(sys.argv[3])
mode = str(sys.argv[4])
logging.basicConfig(filename=outputFile+'stdout.log',level=logging.INFO,format='%(asctime)s %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# parse data
logging.info("(ref) loading data")
lines = sc.textFile(inputFile)
X = lines.map(parseVector)
# get z ordering
logging.info("(ref) getting z ordering")
zinds = X.filter(lambda (k,x) : (k[0] == 1) & (k[1] == 1)).map(lambda (k,x) : k[2]).collect()
savemat(outputFile+"zinds.mat",mdict={'zinds':zinds},oned_as='column',do_compression='true')
# compute ref
logging.info('(ref) computing reference image')
if mode == 'med':
ref = X.map(lambda (k,x) : median(x)).collect()
if mode == 'mean':
ref = X.map(lambda (k,x) : mean(x)).collect()
if mode == 'std':
ref = X.map(lambda (k,x) : std((x - median(x))/(median(x)+0.1))).collect()
logging.info('(ref) saving results...')
savemat(outputFile+mode+".mat",mdict={'ref':ref},oned_as='column',do_compression='true') | # ref <master> <inputFile> <inds> <outputFile>
#
# compute summary statistics on an xyz stack
# each row is (x,y,z,timeseries)
#
import sys
import os
from numpy import *
from scipy.linalg import *
from scipy.sparse import *
from scipy.io import *
from pyspark import SparkContext
import logging
if len(sys.argv) < 4:
print >> sys.stderr, \
"(ref) usage: ref <master> <inputFile> <outputFile> <mode>"
exit(-1)
def parseVector(line):
vec = [float(x) for x in line.split(' ')]
ts = array(vec[3:]) # get tseries
return ((int(vec[0]),int(vec[1]),int(vec[2])),ts) # (x,y,z),(tseries) pair
# parse inputs
sc = SparkContext(sys.argv[1], "ref")
inputFile = str(sys.argv[2])
outputFile = str(sys.argv[3])
mode = str(sys.argv[4])
logging.basicConfig(filename=outputFile+'stdout.log',level=logging.INFO,format='%(asctime)s %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# parse data
logging.info("(ref) loading data")
lines = sc.textFile(inputFile)
X = lines.map(parseVector)
# get z ordering
logging.info("(ref) getting z ordering")
zinds = X.filter(lambda (k,x) : (k[0] == 1000) & (k[1] == 1000)).map(lambda (k,x) : k[2]).collect()
savemat(outputFile+"zinds.mat",mdict={'zinds':zinds},oned_as='column',do_compression='true')
# compute ref
logging.info('(ref) computing reference image')
if mode == 'med':
ref = X.map(lambda (k,x) : median(x)).collect()
if mode == 'mean':
ref = X.map(lambda (k,x) : mean(x)).collect()
if mode == 'std':
ref = X.map(lambda (k,x) : std((x - median(x))/(median(x)+0.1))).collect()
logging.info('(ref) saving results...')
savemat(outputFile+mode+".mat",mdict={'ref':ref},oned_as='column',do_compression='true') | apache-2.0 | Python |
21017b79184459a6a2c0076d6cb1a60d3b1d3c12 | Fix errors in sim harness | PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project | src/run_similarity.py | src/run_similarity.py | import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % (algorithm, size)
corpus = corpus_loc % (algorithm, size)
sup_file = sup_file_loc % (algorithm, size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary_file=corpus_dict, corpus_file=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
# log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main() | import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main() | mit | Python |
f256048258d7a7cb6cd34248db922e69e5b51a0b | add ophyd-olog hooks | NSLS-II-CHX/ipython_ophyd,NSLS-II-CHX/ipython_ophyd | profile_collection/startup/00-startup.py | profile_collection/startup/00-startup.py | import logging
from bluesky.standard_config import * # gs, etc.
import matplotlib.pyplot as plt
plt.ion()
from bluesky import qt_kicker
qt_kicker.install_qt_kicker()
from databroker import DataBroker as db, get_events, get_images, get_table
from epics import caput, caget
# connect olog
# gs.RE.logbook = olog_wrapper(olog_client, ['Data Acquisition'])
# ophyd expects to find 'logbook' in the IPython namespace
from pyOlog import SimpleOlogClient
logbook = SimpleOlogClient()
RE=gs.RE
from bluesky.scientific_callbacks import plot_peak_stats
# from chxtools.xfuncs import *
# from chxtools.plot import plot1
from bluesky.plans import *
import ophyd
from chxtools import attfuncs as atten
| import logging
from bluesky.standard_config import * # gs, etc.
import matplotlib.pyplot as plt
plt.ion()
from bluesky import qt_kicker
qt_kicker.install_qt_kicker()
from databroker import DataBroker as db, get_events, get_images, get_table
from epics import caput, caget
# connect olog
# gs.RE.logbook = olog_wrapper(olog_client, ['Data Acquisition'])
RE=gs.RE
from bluesky.scientific_callbacks import plot_peak_stats
# from chxtools.xfuncs import *
# from chxtools.plot import plot1
from bluesky.plans import *
import ophyd
from chxtools import attfuncs as atten
| bsd-2-clause | Python |
27d8919b151c5dad92f081e0cee9722650db7bae | Add rectification tests | mnhrdt/s2p,carlodef/s2p,mnhrdt/s2p,carlodef/s2p | tests/rectification_test.py | tests/rectification_test.py | import os
import numpy as np
import pytest
from rpcm import rpc_from_geotiff
import s2p
from tests_utils import data_path
@pytest.fixture(name='matches')
def fixture_matches():
matches = np.loadtxt(
data_path(os.path.join('expected_output', 'units', 'unit_matches_from_rpc.txt'))
)
return matches
@pytest.fixture(name='images')
def fixture_images():
res = []
for i in [1, 2]:
im = data_path(os.path.join('input_pair', 'img_0{}.tif'.format(i)))
rpc = rpc_from_geotiff(im)
res.append(im)
res.append(rpc)
return res
def test_rectification_homographies(matches):
"""
Test for rectification.rectification_homographies().
"""
x, y, w, h = 100, 100, 200, 200
H1, H2, F = s2p.rectification.rectification_homographies(matches, x, y, w, h)
for variable, filename in zip([H1, H2, F], ['H1.txt', 'H2.txt', 'F.txt']):
expected = np.loadtxt(data_path(os.path.join('expected_output', 'units', filename)))
np.testing.assert_allclose(variable, expected, rtol=0.01, atol=1e-6, verbose=True)
def test_rectify_pair_no_matches(tmp_path, images):
"""
Test running rectification.rectify_pair() where no matches are found.
"""
im1, rpc1, im2, rpc2 = images
with pytest.raises(s2p.rectification.NoRectificationMatchesError):
s2p.rectification.rectify_pair(
im1=im1,
im2=im2,
rpc1=rpc1,
rpc2=rpc2,
x=100,
y=100,
w=200,
h=200,
out1=str(tmp_path / 'out1.tiff'),
out2=str(tmp_path / 'out2.tiff'),
sift_matches=None,
method='sift',
)
def test_rectify_pair_few_matches(tmp_path, matches, images):
"""
Test running rectification.rectify_pair() where less than 4 matches are found.
"""
im1, rpc1, im2, rpc2 = images
with pytest.raises(s2p.rectification.NoRectificationMatchesError):
s2p.rectification.rectify_pair(
im1=im1,
im2=im2,
rpc1=rpc1,
rpc2=rpc2,
x=100,
y=100,
w=200,
h=200,
out1=str(tmp_path / 'out1.tiff'),
out2=str(tmp_path / 'out2.tiff'),
sift_matches=matches[:3],
method='sift',
)
def test_rectify_pair_with_matches(tmp_path, matches, images):
"""
Test running rectification.rectify_pair() with some matches.
"""
im1, rpc1, im2, rpc2 = images
s2p.rectification.rectify_pair(
im1=im1,
im2=im2,
rpc1=rpc1,
rpc2=rpc2,
x=100,
y=100,
w=200,
h=200,
out1=str(tmp_path / 'out1.tiff'),
out2=str(tmp_path / 'out2.tiff'),
sift_matches=matches,
method='sift',
)
| import os
import numpy as np
import s2p
from tests_utils import data_path
def test_rectification_homographies():
"""
Test for rectification.rectification_homographies().
"""
matches = np.loadtxt(data_path(os.path.join('expected_output', 'units',
'unit_matches_from_rpc.txt')))
x, y, w, h = 100, 100, 200, 200
H1, H2, F = s2p.rectification.rectification_homographies(matches, x, y, w, h)
for variable, filename in zip([H1, H2, F], ['H1.txt', 'H2.txt', 'F.txt']):
expected = np.loadtxt(data_path(os.path.join('expected_output', 'units', filename)))
np.testing.assert_allclose(variable, expected, rtol=0.01, atol=1e-6, verbose=True)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.