commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
2e382c8bff2d0c3733b9b525168254971ca1175e | Update atexit function to avoid issues with late binding | python/pyspark/shell.py | python/pyspark/shell.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched as a PYTHONSTARTUP script.
"""
import atexit
import os
import platform
import warnings
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
SparkContext._ensure_initialized() # type: ignore
try:
spark = SparkSession._create_shell_session() # type: ignore
except Exception:
import sys
import traceback
warnings.warn("Failed to initialize Spark session.")
traceback.print_exc(file=sys.stderr)
sys.exit(1)
sc = spark.sparkContext
sql = spark.sql
atexit.register((lambda sc: lambda: sc.stop())(sc))
# for compatibility
sqlContext = spark._wrapped
sqlCtx = sqlContext
print(r"""Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version %s
/_/
""" % sc.version)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
print("Spark context Web UI available at %s" % (sc.uiWebUrl))
print("Spark context available as 'sc' (master = %s, app id = %s)." % (sc.master, sc.applicationId))
print("SparkSession available as 'spark'.")
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,
# which allows us to execute the user's PYTHONSTARTUP file:
_pythonstartup = os.environ.get('OLD_PYTHONSTARTUP')
if _pythonstartup and os.path.isfile(_pythonstartup):
with open(_pythonstartup) as f:
code = compile(f.read(), _pythonstartup, 'exec')
exec(code)
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched as a PYTHONSTARTUP script.
"""
import atexit
import os
import platform
import warnings
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
SparkContext._ensure_initialized() # type: ignore
try:
spark = SparkSession._create_shell_session() # type: ignore
except Exception:
import sys
import traceback
warnings.warn("Failed to initialize Spark session.")
traceback.print_exc(file=sys.stderr)
sys.exit(1)
sc = spark.sparkContext
sql = spark.sql
atexit.register(lambda: sc.stop())
# for compatibility
sqlContext = spark._wrapped
sqlCtx = sqlContext
print(r"""Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version %s
/_/
""" % sc.version)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
print("Spark context Web UI available at %s" % (sc.uiWebUrl))
print("Spark context available as 'sc' (master = %s, app id = %s)." % (sc.master, sc.applicationId))
print("SparkSession available as 'spark'.")
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,
# which allows us to execute the user's PYTHONSTARTUP file:
_pythonstartup = os.environ.get('OLD_PYTHONSTARTUP')
if _pythonstartup and os.path.isfile(_pythonstartup):
with open(_pythonstartup) as f:
code = compile(f.read(), _pythonstartup, 'exec')
exec(code)
| Python | 0.99902 |
10e6c53a39d3ee57d855ada1aa6e9d620f094465 | add 'save' command | track-cli.py | track-cli.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import zmq
import logging
log = logging.getLogger('track_cli')
def print_info():
log.info("zeromq version: %s" % zmq.zmq_version())
log.info("pyzmq version: %s" % zmq.pyzmq_version())
def send_request(request):
context = zmq.Context()
req_socket = context.socket(zmq.REQ)
req_socket.connect('tcp://127.0.0.1:3456')
req_socket.send_json(request)
return req_socket.recv_json()
def handle_result(result):
if 'type' in result and result['type'] == 'error':
raise Exception('server replied with error: "%s"' % result['what'])
print(result)
def main():
args = sys.argv[1:]
if args == []:
print('no command provided')
return
elif args == ['quit']:
request = {'type': 'quit'}
elif args == ['version']:
request = {'type': 'version'}
elif args == ['apps']:
request = {'type': 'apps'}
elif args == ['current']:
request = {'type': 'current'}
elif args == ['rules']:
request = {'type': 'rules'}
elif args == ['save']:
request = {'type': 'save'}
elif args == ['help']:
print(['quit', 'version', 'apps', 'current', 'rules'])
sys.exit()
else:
raise Exception('command not handled: %s' % args)
try:
result = send_request(request)
handle_result(result)
except zmq.ZMQError as e:
log.error(e)
return
except KeyboardInterrupt:
log.info("got keyboard interrupt - exit")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import zmq
import logging
log = logging.getLogger('track_cli')
def print_info():
log.info("zeromq version: %s" % zmq.zmq_version())
log.info("pyzmq version: %s" % zmq.pyzmq_version())
def send_request(request):
context = zmq.Context()
req_socket = context.socket(zmq.REQ)
req_socket.connect('tcp://127.0.0.1:3456')
req_socket.send_json(request)
return req_socket.recv_json()
def handle_result(result):
if 'type' in result and result['type'] == 'error':
raise Exception('server replied with error: "%s"' % result['what'])
print(result)
def main():
args = sys.argv[1:]
if args == []:
print('no command provided')
return
elif args == ['quit']:
request = {'type': 'quit'}
elif args == ['version']:
request = {'type': 'version'}
elif args == ['apps']:
request = {'type': 'apps'}
elif args == ['current']:
request = {'type': 'current'}
elif args == ['rules']:
request = {'type': 'rules'}
elif args == ['help']:
print(['quit', 'version', 'apps', 'current', 'rules'])
sys.exit()
else:
raise Exception('command not handled: %s' % args)
try:
result = send_request(request)
handle_result(result)
except zmq.ZMQError as e:
log.error(e)
return
except KeyboardInterrupt:
log.info("got keyboard interrupt - exit")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| Python | 0.000009 |
0dea5f2b6a2e6d702167c3415d10a47275e30601 | update the version to 0.6.0 | zignal/__init__.py | zignal/__init__.py | """
This is the zignal library
@author: Ronny Andersson (ronny@andersson.tk)
@copyright: (c) 2013 Ronny Andersson
@license: MIT
"""
__version__ = "0.6.0"
from .audio import *
from . import filters
from . import measure
from . import music
from . import sndcard
__all__ = [
'filters',
'measure',
'music',
'sndcard',
]
__all__.extend(audio.__all__) #@UndefinedVariable
| """
This is the zignal library
@author: Ronny Andersson (ronny@andersson.tk)
@copyright: (c) 2013 Ronny Andersson
@license: MIT
"""
__version__ = "0.5.0"
from .audio import *
from . import filters
from . import measure
from . import music
from . import sndcard
__all__ = [
'filters',
'measure',
'music',
'sndcard',
]
__all__.extend(audio.__all__) #@UndefinedVariable
| Python | 0.000005 |
c91240cd43c4f714a404cf5f2ce566dad290c0c5 | Add url mapping for ProjectEntrySumsAPIView | trex/urls.py | trex/urls.py | # -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from trex.views import project
urlpatterns = patterns(
'',
url(r"^$",
TemplateView.as_view(template_name="index.html"),
name="index",
),
url(r"^api/1/projects/?$",
project.ProjectListCreateAPIView.as_view(),
name="project-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/$",
project.ProjectDetailAPIView.as_view(),
name="project-detail"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/entries/?$",
project.ProjectEntriesListAPIView.as_view(),
name="project-entries-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/entries/sums/?$",
project.ProjectEntrySumsAPIView.as_view(),
name="project-entries-sums"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/tags/?$",
project.ProjectTagsListAPIView.as_view(),
name="project-tags-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/users/?$",
project.ProjectUsersListAPIView.as_view(),
name="project-users-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/zeiterfassung/?$",
project.ProjectZeiterfassungAPIView.as_view(),
name="project-zeiterfassung"),
url(r"^api/1/entries/(?P<pk>[0-9]+)/?$",
project.EntryDetailAPIView.as_view(),
name="entry-detail"),
url(r"^api/1/tags/(?P<pk>[0-9]+)/?$",
project.TagDetailAPIView.as_view(),
name="tag-detail"),
)
| # -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from trex.views import project
urlpatterns = patterns(
'',
url(r"^$",
TemplateView.as_view(template_name="index.html"),
name="index",
),
url(r"^api/1/projects/?$",
project.ProjectListCreateAPIView.as_view(),
name="project-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/$",
project.ProjectDetailAPIView.as_view(),
name="project-detail"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/entries/?$",
project.ProjectEntriesListAPIView.as_view(),
name="project-entries-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/tags/?$",
project.ProjectTagsListAPIView.as_view(),
name="project-tags-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/users/?$",
project.ProjectUsersListAPIView.as_view(),
name="project-users-list"),
url(r"^api/1/projects/(?P<pk>[0-9]+)/zeiterfassung/?$",
project.ProjectZeiterfassungAPIView.as_view(),
name="project-zeiterfassung"),
url(r"^api/1/entries/(?P<pk>[0-9]+)/?$",
project.EntryDetailAPIView.as_view(),
name="entry-detail"),
url(r"^api/1/tags/(?P<pk>[0-9]+)/?$",
project.TagDetailAPIView.as_view(),
name="tag-detail"),
)
| Python | 0 |
7cbee5e817b6d2bbf4fbcbf8cf1cf327bdbabc9c | rename locator_string to package_id | cms/djangoapps/contentstore/management/commands/migrate_to_split.py | cms/djangoapps/contentstore/management/commands/migrate_to_split.py | """
Django management command to migrate a course from the old Mongo modulestore
to the new split-Mongo modulestore.
"""
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.split_migrator import SplitMigrator
from xmodule.modulestore import InvalidLocationError
from xmodule.modulestore.django import loc_mapper
def user_from_str(identifier):
"""
Return a user identified by the given string. The string could be an email
address, or a stringified integer corresponding to the ID of the user in
the database. If no user could be found, a User.DoesNotExist exception
will be raised.
"""
try:
user_id = int(identifier)
except ValueError:
return User.objects.get(email=identifier)
else:
return User.objects.get(id=user_id)
class Command(BaseCommand):
"Migrate a course from old-Mongo to split-Mongo"
help = "Migrate a course from old-Mongo to split-Mongo"
args = "location email <locator>"
def parse_args(self, *args):
"""
Return a three-tuple of (location, user, locator_string).
If the user didn't specify a locator string, the third return value
will be None.
"""
if len(args) < 2:
raise CommandError(
"migrate_to_split requires at least two arguments: "
"a location and a user identifier (email or ID)"
)
try:
location = Location(args[0])
except InvalidLocationError:
raise CommandError("Invalid location string {}".format(args[0]))
try:
user = user_from_str(args[1])
except User.DoesNotExist:
raise CommandError("No user found identified by {}".format(args[1]))
try:
package_id = args[2]
except IndexError:
package_id = None
return location, user, package_id
def handle(self, *args, **options):
location, user, package_id = self.parse_args(*args)
migrator = SplitMigrator(
draft_modulestore=modulestore('default'),
direct_modulestore=modulestore('direct'),
split_modulestore=modulestore('split'),
loc_mapper=loc_mapper(),
)
migrator.migrate_mongo_course(location, user, package_id)
| """
Django management command to migrate a course from the old Mongo modulestore
to the new split-Mongo modulestore.
"""
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.split_migrator import SplitMigrator
from xmodule.modulestore import InvalidLocationError
from xmodule.modulestore.django import loc_mapper
def user_from_str(identifier):
"""
Return a user identified by the given string. The string could be an email
address, or a stringified integer corresponding to the ID of the user in
the database. If no user could be found, a User.DoesNotExist exception
will be raised.
"""
try:
user_id = int(identifier)
except ValueError:
return User.objects.get(email=identifier)
else:
return User.objects.get(id=user_id)
class Command(BaseCommand):
"Migrate a course from old-Mongo to split-Mongo"
help = "Migrate a course from old-Mongo to split-Mongo"
args = "location email <locator>"
def parse_args(self, *args):
"""
Return a three-tuple of (location, user, locator_string).
If the user didn't specify a locator string, the third return value
will be None.
"""
if len(args) < 2:
raise CommandError(
"migrate_to_split requires at least two arguments: "
"a location and a user identifier (email or ID)"
)
try:
location = Location(args[0])
except InvalidLocationError:
raise CommandError("Invalid location string {}".format(args[0]))
try:
user = user_from_str(args[1])
except User.DoesNotExist:
raise CommandError("No user found identified by {}".format(args[1]))
try:
locator_string = args[2]
except IndexError:
locator_string = None
return location, user, locator_string
def handle(self, *args, **options):
location, user, locator_string = self.parse_args(*args)
migrator = SplitMigrator(
draft_modulestore=modulestore('default'),
direct_modulestore=modulestore('direct'),
split_modulestore=modulestore('split'),
loc_mapper=loc_mapper(),
)
migrator.migrate_mongo_course(location, user, locator_string)
| Python | 0.000355 |
c61187382c968c3018f88637806874ddd0b63b71 | add log for debug | web/views.py | web/views.py | import requests
from django.http import Http404
from django.shortcuts import render, render_to_response, redirect
# Create your views here.
from django.template import RequestContext
from web.fetch import Fetcher
from settings import LEAN_CLOUD_ID, LEAN_CLOUD_SECRET
import leancloud
# @api_view(('GET',))
# def api_root(request, format=None):
# return Response({
# 'chairmans': reverse('chairman-list', request=request, format=format),
# })
def get_index(request):
# response = requests.get('http://127.0.0.1:8000/api/chairmans/')
# chairmans = response.json()
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
Chairman = leancloud.Object.extend('Chairman')
query = Chairman.query
query.select('type', 'href', 'id', 'title', 'img', 'name', 'num')
query.add_descending('num')
query_list = query.find()
chairmans = []
for chairman in query_list:
print chairman
chairman_view = {}
chairman_view.type = chairman.get('type')
chairman_view.href = chairman.get('href')
chairman_view.id = chairman.get('id')
chairman_view.title = chairman.get('title')
chairman_view.img = chairman.get('img')
chairman_view.name = chairman.get('name')
chairman_view.num = chairman.get('num')
chairmans.append(chairman_view)
return render_to_response('index.html', locals())
def fetch(request):
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
query = leancloud.Query('Chairman')
allDataCompleted = False
batch = 0
limit = 1000
while not allDataCompleted:
query.limit(limit)
query.skip(batch * limit)
query.add_ascending('createdAt')
resultList = query.find()
if len(resultList) < limit:
allDataCompleted = True
leancloud.Object.destroy_all(resultList)
batch += 1
fetcher = Fetcher()
fetcher.fetch_cc()
fetcher.fetch_douyu()
fetcher.fetch_longzhu()
fetcher.fetch_quanmin()
fetcher.fetch_xiongmao()
fetcher.fetch_zhanqi()
fetcher.fetch_huya()
for chairman in fetcher.chairmans:
try:
chairman.save()
except Exception, e:
print e
return redirect("/")
| import requests
from django.http import Http404
from django.shortcuts import render, render_to_response, redirect
# Create your views here.
from django.template import RequestContext
from web.fetch import Fetcher
from settings import LEAN_CLOUD_ID, LEAN_CLOUD_SECRET
import leancloud
# @api_view(('GET',))
# def api_root(request, format=None):
# return Response({
# 'chairmans': reverse('chairman-list', request=request, format=format),
# })
def get_index(request):
# response = requests.get('http://127.0.0.1:8000/api/chairmans/')
# chairmans = response.json()
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
Chairman = leancloud.Object.extend('Chairman')
query = Chairman.query
query.select('type', 'href', 'id', 'title', 'img', 'name', 'num')
query.add_descending('num')
query_list = query.find()
chairmans = []
for chairman in query_list:
chairman_view = {}
chairman_view.type = chairman.get('type')
chairman_view.href = chairman.get('href')
chairman_view.id = chairman.get('id')
chairman_view.title = chairman.get('title')
chairman_view.img = chairman.get('img')
chairman_view.name = chairman.get('name')
chairman_view.num = chairman.get('num')
chairmans.append(chairman_view)
return render_to_response('index.html', locals())
def fetch(request):
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
query = leancloud.Query('Chairman')
allDataCompleted = False
batch = 0
limit = 1000
while not allDataCompleted:
query.limit(limit)
query.skip(batch * limit)
query.add_ascending('createdAt')
resultList = query.find()
if len(resultList) < limit:
allDataCompleted = True
leancloud.Object.destroy_all(resultList)
batch += 1
fetcher = Fetcher()
fetcher.fetch_cc()
fetcher.fetch_douyu()
fetcher.fetch_longzhu()
fetcher.fetch_quanmin()
fetcher.fetch_xiongmao()
fetcher.fetch_zhanqi()
fetcher.fetch_huya()
for chairman in fetcher.chairmans:
try:
chairman.save()
except Exception, e:
print e
return redirect("/")
| Python | 0.000001 |
547c8c6a1aef80ce43451479e0b79b51db5b717a | Fix headers types | pyshop/views/credentials.py | pyshop/views/credentials.py | # -*- coding: utf-8 -*-
"""
PyShop Credentials Views.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import base64
from pyramid.httpexceptions import HTTPFound
from pyramid.url import resource_url, route_url
from pyramid.security import remember, forget
from pyramid.response import Response
from pyshop.helpers.i18n import trans as _
from pyshop.models import DBSession, User
from pyshop.compat import unicode
from .base import View
log = logging.getLogger(__name__)
class Login(View):
def render(self):
login_url = resource_url(self.request.context, self.request, 'login')
referrer = self.request.url
# never use the login form itself as came_from
if referrer == login_url:
referrer = '/'
came_from = self.request.params.get('came_from', referrer)
login = self.request.params.get('user.login', '')
if 'form.submitted' in self.request.params:
password = self.request.params.get('user.password', u'')
if password:
if User.by_ldap_credentials(self.session, login, password,
self.request.registry.settings) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
if User.by_credentials(self.session, login, password) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
return {'came_from': came_from,
'user': User(login=login),
}
class Logout(View):
def render(self):
return HTTPFound(location=route_url('index', self.request),
headers=forget(self.request))
def authbasic(request):
"""
Authentification basic, Upload pyshop repository access
"""
if len(request.environ.get('HTTP_AUTHORIZATION','')) > 0:
auth = request.environ.get('HTTP_AUTHORIZATION')
scheme, data = auth.split(None, 1)
assert scheme.lower() == 'basic'
data = base64.b64decode(data)
if not isinstance(data, unicode):
data = data.decode('utf-8')
username, password = data.split(':', 1)
if User.by_ldap_credentials(DBSession(), username, password, request.registry.settings):
return HTTPFound(location=request.url)
if User.by_credentials(DBSession(), username, password):
return HTTPFound(location=request.url)
return Response(status=401,
headerlist=[(b'WWW-Authenticate',
b'Basic realm="pyshop repository access"'
)],
)
| # -*- coding: utf-8 -*-
"""
PyShop Credentials Views.
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import base64
from pyramid.httpexceptions import HTTPFound
from pyramid.url import resource_url, route_url
from pyramid.security import remember, forget
from pyramid.response import Response
from pyshop.helpers.i18n import trans as _
from pyshop.models import DBSession, User
from pyshop.compat import unicode
from .base import View
log = logging.getLogger(__name__)
class Login(View):
def render(self):
login_url = resource_url(self.request.context, self.request, 'login')
referrer = self.request.url
# never use the login form itself as came_from
if referrer == login_url:
referrer = '/'
came_from = self.request.params.get('came_from', referrer)
login = self.request.params.get('user.login', '')
if 'form.submitted' in self.request.params:
password = self.request.params.get('user.password', u'')
if password:
if User.by_ldap_credentials(self.session, login, password,
self.request.registry.settings) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
if User.by_credentials(self.session, login, password) is not None:
log.info('login %r succeed' % login)
headers = remember(self.request, login)
return HTTPFound(location=came_from,
headers=headers)
return {'came_from': came_from,
'user': User(login=login),
}
class Logout(View):
def render(self):
return HTTPFound(location=route_url('index', self.request),
headers=forget(self.request))
def authbasic(request):
"""
Authentification basic, Upload pyshop repository access
"""
if len(request.environ.get('HTTP_AUTHORIZATION','')) > 0:
auth = request.environ.get('HTTP_AUTHORIZATION')
scheme, data = auth.split(None, 1)
assert scheme.lower() == 'basic'
data = base64.b64decode(data)
if not isinstance(data, unicode):
data = data.decode('utf-8')
username, password = data.split(':', 1)
if User.by_ldap_credentials(DBSession(), username, password, request.registry.settings):
return HTTPFound(location=request.url)
if User.by_credentials(DBSession(), username, password):
return HTTPFound(location=request.url)
return Response(status=401,
headerlist=[('WWW-Authenticate',
'Basic realm="pyshop repository access"'
)],
)
| Python | 0.000002 |
42dfb6850be83ba17b9e649a499926d31f1afa95 | Fixing command. | windmill/browser/chrome.py | windmill/browser/chrome.py | import commands
import tempfile
import logging
import signal
import subprocess
import sys, os
import urlparse
import windmill
logger = logging.getLogger(__name__)
import safari
class Chrome(safari.Safari):
def __init__(self):
self.chrome_binary = windmill.settings['CHROME_BINARY']
self.test_url = windmill.settings['TEST_URL']
# def create_redirect(self):
# self.redirection_page = tempfile.mktemp(suffix='.html')
# f = open(self.redirection_page, 'w')
# test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
# f.write( html_redirection.replace('{replace}', test_url) )
# f.flush() ; f.close()
# def set_proxy_mac(self):
# """Set local Proxy"""
# self.netsetup_binary = windmill.settings['NETWORKSETUP_BINARY']
# interface_name = find_default_interface_name()
# uri = urlparse.urlparse(self.test_url)
# set_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxy',
# '"'+interface_name+'"',
# 'localhost',
# str(windmill.settings['SERVER_HTTP_PORT'])
# ])
# commands.getoutput(set_proxy_command)
# enable_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxystate',
# '"'+interface_name+'"',
# 'on'
# ])
# commands.getoutput(enable_proxy_command)
# self.create_redirect()
# self.interface_name = interface_name
#
# def unset_proxy_mac(self):
# commands.getoutput(' '.join([self.netsetup_binary, '-setwebproxystate', '"'+self.interface_name+'"', 'off']))
def set_proxy_windows(self):
import ie
self.ie_obj = ie.InternetExplorer()
self.ie_obj.set_proxy()
#
# def unset_proxy_windows(self):
# self.ie_obj.unset_proxy()
def start(self):
"""Start Chrome"""
# if sys.platform == 'darwin':
# self.set_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.set_proxy_windows()
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
command = [self.chrome_binary, '--homepage', self.test_url+'/windmill-serv/start.html']
self.p_handle = subprocess.Popen(command, **kwargs)
logger.info(command)
def kill(self, kill_signal=None):
"""Stop Chrome"""
# if sys.platform == 'darwin':
# self.unset_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.unset_proxy_windows()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Chrome')
# def stop(self):
# self.kill(signal.SIGTERM)
#
# def is_alive(self):
# if self.p_handle.poll() is None:
# return False
# return True
| import commands
import tempfile
import logging
import signal
import subprocess
import sys, os
import urlparse
import windmill
logger = logging.getLogger(__name__)
import safari
class Chrome(safari.Safari):
def __init__(self):
self.chrome_binary = windmill.settings['CHROME_BINARY']
self.test_url = windmill.settings['TEST_URL']
# def create_redirect(self):
# self.redirection_page = tempfile.mktemp(suffix='.html')
# f = open(self.redirection_page, 'w')
# test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
# f.write( html_redirection.replace('{replace}', test_url) )
# f.flush() ; f.close()
# def set_proxy_mac(self):
# """Set local Proxy"""
# self.netsetup_binary = windmill.settings['NETWORKSETUP_BINARY']
# interface_name = find_default_interface_name()
# uri = urlparse.urlparse(self.test_url)
# set_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxy',
# '"'+interface_name+'"',
# 'localhost',
# str(windmill.settings['SERVER_HTTP_PORT'])
# ])
# commands.getoutput(set_proxy_command)
# enable_proxy_command = ' '.join([ self.netsetup_binary,
# '-setwebproxystate',
# '"'+interface_name+'"',
# 'on'
# ])
# commands.getoutput(enable_proxy_command)
# self.create_redirect()
# self.interface_name = interface_name
#
# def unset_proxy_mac(self):
# commands.getoutput(' '.join([self.netsetup_binary, '-setwebproxystate', '"'+self.interface_name+'"', 'off']))
def set_proxy_windows(self):
import ie
self.ie_obj = ie.InternetExplorer()
self.ie_obj.set_proxy()
#
# def unset_proxy_windows(self):
# self.ie_obj.unset_proxy()
def start(self):
"""Start Chrome"""
# if sys.platform == 'darwin':
# self.set_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.set_proxy_windows()
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
self.p_handle = subprocess.Popen([self.chrome_binary, '--homepage', self.test_url], **kwargs)
logger.info([self.chrome_binary, self.redirection_page])
def kill(self, kill_signal=None):
"""Stop Chrome"""
# if sys.platform == 'darwin':
# self.unset_proxy_mac()
if os.name == 'nt' or sys.platform == 'cygwin':
self.unset_proxy_windows()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Chrome')
# def stop(self):
# self.kill(signal.SIGTERM)
#
# def is_alive(self):
# if self.p_handle.poll() is None:
# return False
# return True
| Python | 0.999753 |
0921f78660b7b0784ebe2fa586dd54551704699e | Fix fix_gir.py to work with ginterfaces and to support delegates. | tools/fix_gir.py | tools/fix_gir.py | #!/usr/bin/python
from xml.dom import minidom
def purge_white_space_and_fix_namespace(node, indent=0):
if getattr(node, "tagName", None) == "namespace":
name = node.getAttribute("name")
node.setAttribute("name", name.lstrip('_'))
for child in [c for c in node.childNodes]:
if child.nodeType == node.TEXT_NODE or \
getattr(child, "tagName", None) == "annotation":
node.removeChild(child)
continue
purge_white_space_and_fix_namespace(child, indent+1)
def find_ancestor(node, name):
if getattr(node, "tagName", None) == name:
return node
parent = getattr(node, "parentNode", None)
if not parent:
return None
return find_ancestor(parent, name)
def fix_vfuncs(dom):
for f in dom.getElementsByTagName("callback"):
record = find_ancestor(f, "record")
if not record:
continue
name = record.getAttribute("name")
cname = record.getAttribute("c:type")
assert(name.endswith("Class") or name.endswith("Iface"))
assert(cname.endswith("Class") or name.endswith("Iface"))
params = (f.getElementsByTagName("parameters") or [None])[0]
if not params:
params = dom.createElement("parameters")
f.insertBefore(params, f.firstChild)
param = dom.createElement("parameter")
param.setAttribute("name", "self")
param.setAttribute("transfer-ownership", "none")
ptype = dom.createElement("type")
ptype.setAttribute("name", name[:-5])
ptype.setAttribute("c:type", cname[:-5])
param.appendChild(ptype)
params.insertBefore(param, params.firstChild)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print "supply a gir file"
sys.exit(1)
dom = minidom.parse(sys.argv[-1])
purge_white_space_and_fix_namespace(dom)
fix_vfuncs(dom)
print dom.toprettyxml(indent=" ", newl="\n")
| #!/usr/bin/python
from xml.dom import minidom
def purge_white_space_and_fix_namespace(node, indent=0):
if getattr(node, "tagName", None) == "namespace":
name = node.getAttribute("name")
node.setAttribute("name", name.lstrip('_'))
for child in [c for c in node.childNodes]:
if child.nodeType == node.TEXT_NODE or \
getattr(child, "tagName", None) == "annotation":
node.removeChild(child)
continue
purge_white_space_and_fix_namespace(child, indent+1)
def find_ancestor(node, name):
if getattr(node, "tagName") == name:
return node
parent = getattr(node, "parentNode", None)
if not parent:
return None
return find_ancestor(parent, name)
def fix_vfuncs(dom):
for f in dom.getElementsByTagName("callback"):
record = find_ancestor(f, "record")
if not record:
continue
name = record.getAttribute("name")
cname = record.getAttribute("c:type")
assert(name.endswith("Class"))
assert(cname.endswith("Class"))
params = (f.getElementsByTagName("parameters") or [None])[0]
if not params:
params = dom.createElement("parameters")
f.insertBefore(params, f.firstChild)
param = dom.createElement("parameter")
param.setAttribute("name", "self")
param.setAttribute("transfer-ownership", "none")
ptype = dom.createElement("type")
ptype.setAttribute("name", name[:-5])
ptype.setAttribute("c:type", cname[:-5])
param.appendChild(ptype)
params.insertBefore(param, params.firstChild)
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print "supply a gir file"
sys.exit(1)
dom = minidom.parse(sys.argv[-1])
purge_white_space_and_fix_namespace(dom)
fix_vfuncs(dom)
print dom.toprettyxml(indent=" ", newl="\n")
| Python | 0 |
2c4cf38b7251ddffaba954f71bbca9632123777c | Add start_wizbit_server function that registers and publishes a wizbit server. | wizd/wizd.py | wizd/wizd.py | #! /usr/bin/env python
import sys
import socket
import os
import SimpleXMLRPCServer
import gobject
from wizbit import ServicePublisher, ServiceBrowser
WIZBIT_SERVER_PORT = 3492
from wizbit import Shares, Directory
from wizbit import *
class WizbitServer():
def getShares(self):
shares = Shares.getShares()
return shares
def getPath(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
return directory
def getLastConfSeen(self, uuid):
return "Not Implemented"
def setConf(self, uuid, confstring):
return "Not Implemented"
def getConf(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
wizpath = Paths(directory)
file = open(wizpath.getWizconf(), "r")
conf = file.read()
file.close()
return conf
def pushNotify(self, dirId, remoteShareId, host):
#For every local directory with the same shareId, pull
#from the remote directory
shares = Shares.getShares()
for id, localShareId, directory in shares:
if localShareId == remoteShareId:
Directory.pull(directory, dirId, host)
def server_socket_error():
print "RPC server socket was disconnected, exiting"
global main_loop
main_loop.quit()
def server_callback(source, cb_condition, server):
server.handle_request()
def start_wizbit_server():
servinst = WizbitServer()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("", 0))
server.register_instance(servinst)
server.register_introspection_functions()
gobject.io_add_watch (server.fileno(), gobject.IO_IN, server_callback, server)
gobject.io_add_watch (server.fileno(), gobject.IO_HUP | gobject.IO_ERR, server_socket_error)
sp = ServicePublisher("Wizbit", "_wizbit._tcp", server.server_address[1])
sb = ServiceBrowser("_wizbit._tcp")
def main(args):
global main_loop
start_wizbit_server()
main_loop = gobject.MainLoop()
try:
main_loop.run()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #! /usr/bin/env python
import sys
import socket
import os
import SimpleXMLRPCServer
import gobject
from wizbit import ServicePublisher, ServiceBrowser
WIZBIT_SERVER_PORT = 3492
from wizbit import Shares, Directory
from wizbit import *
class WizbitServer():
def getShares(self):
shares = Shares.getShares()
return shares
def getPath(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
return directory
def getLastConfSeen(self, uuid):
return "Not Implemented"
def setConf(self, uuid, confstring):
return "Not Implemented"
def getConf(self, uuid):
shares = Shares.getShares()
for id, shareId, directory in shares:
if uuid == id:
break
wizpath = Paths(directory)
file = open(wizpath.getWizconf(), "r")
conf = file.read()
file.close()
return conf
def pushNotify(self, dirId, remoteShareId, host):
#For every local directory with the same shareId, pull
#from the remote directory
shares = Shares.getShares()
for id, localShareId, directory in shares:
if localShareId == remoteShareId:
Directory.pull(directory, dirId, host)
def server_socket_error():
print "RPC server socket was disconnected, exiting"
global main_loop
main_loop.quit()
def server_callback(source, cb_condition, server):
server.handle_request()
def main(args):
servinst = WizbitServer()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("", 0))
server.register_instance(servinst)
server.register_introspection_functions()
gobject.io_add_watch (server.fileno(), gobject.IO_IN, server_callback, server)
gobject.io_add_watch (server.fileno(), gobject.IO_HUP | gobject.IO_ERR, server_socket_error)
sp = ServicePublisher("Wizbit", "_wizbit._tcp", server.server_address[1])
sb = ServiceBrowser("_wizbit._tcp")
global main_loop
main_loop = gobject.MainLoop()
try:
main_loop.run()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0 |
5258c7d70796a03361ad865a15fd3896bb7a95f1 | Fix tests | pypeman/tests/test_nodes.py | pypeman/tests/test_nodes.py | import unittest
import asyncio
import logging
class FakeChannel():
def __init__(self):
self.logger = logging.getLogger()
self.uuid = 'fakeChannel'
class NodesTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
#asyncio.set_event_loop(None)
def test_log_node(self):
""" if Log() node is functionnal """
from pypeman.nodes import Log
from pypeman import message
n = Log()
n.channel = FakeChannel()
m = message.Message()
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
def test_json_to_python_node(self):
""" if JsonToPython() node is functionnal """
from pypeman.nodes import JsonToPython
from pypeman import message
n = JsonToPython()
n.channel = FakeChannel()
m = message.Message()
m.payload = '{"test":2}'
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
| import unittest
import asyncio
class FakeChannel():
def __init__(self):
self.uuid = 'fakeChannel'
class NodesTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
#asyncio.set_event_loop(None)
def test_log_node(self):
""" if Log() node is functionnal """
from pypeman.nodes import Log
from pypeman import message
n = Log()
n.channel = FakeChannel()
m = message.Message()
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
def test_json_to_python_node(self):
""" if JsonToPython() node is functionnal """
from pypeman.nodes import JsonToPython
from pypeman import message
n = JsonToPython()
n.channel = FakeChannel()
m = message.Message()
m.payload = '{"test":2}'
@asyncio.coroutine
def go():
ret = yield from n.handle(m)
return ret
self.loop.run_until_complete(go())
| Python | 0.000003 |
175cfe45aba554d1544be3ee71bdb8a7b499d879 | add radius in request | wtm/views.py | wtm/views.py | import urllib2
from lxml import etree
from deform import Form
from pyramid.view import view_config
from wtm.schemas.home import HomeSchema
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
"""
home page
"""
homeForm = Form(HomeSchema(), buttons=('submit',), action=request.route_path('addContent'))
return {'form': homeForm.render()}
@view_config(route_name='addContent', renderer='json')
def addContent(request):
baseURL = 'http://www.overpass-api.de/api/interpreter'
data = 'node(around:%s.0,%s,%s)["amenity"="cafe"];out;' % (request.POST['dist'],
request.POST['lat'],
request.POST['lon'])
print data
url = urllib2.Request(baseURL, data)
xmlData = urllib2.urlopen(url).read()
xml = etree.fromstring(xmlData)
for node in xml.xpath('node/tag[@k="name"]'):
print node.get('v')
return ''
| import urllib2
from lxml import etree
from deform import Form
from pyramid.view import view_config
from wtm.schemas.home import HomeSchema
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
"""
home page
"""
homeForm = Form(HomeSchema(), buttons=('submit',), action=request.route_path('addContent'))
return {'form': homeForm.render()}
@view_config(route_name='addContent', renderer='json')
def addContent(request):
baseURL = 'http://www.overpass-api.de/api/interpreter'
data = 'node(around:250.0,%s,%s)["amenity"="cafe"];out;' % (request.POST['lat'], request.POST['lon'])
print data
url = urllib2.Request(baseURL, data)
xmlData = urllib2.urlopen(url).read()
xml = etree.fromstring(xmlData)
for node in xml.xpath('node/tag[@k="name"]'):
print node.get('v')
return ''
| Python | 0 |
2313a796842cbe65563a62fe12edec06c4112531 | Add YEARS_PEY_DAY. | pyrate/core/ifgconstants.py | pyrate/core/ifgconstants.py | # This Python module is part of the PyRate software package.
#
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains a collection of constants used in
various components of the PyRate software
"""
# lookup keys for the metadata fields in PyRate GeoTIFF files
PYRATE_NCOLS = 'NCOLS'
PYRATE_NROWS = 'NROWS'
PYRATE_X_STEP = 'X_STEP'
PYRATE_Y_STEP = 'Y_STEP'
PYRATE_LAT = 'LAT'
PYRATE_LONG = 'LONG'
MASTER_DATE = 'MASTER_DATE'
MASTER_TIME = 'MASTER_TIME'
SLAVE_DATE = 'SLAVE_DATE'
SLAVE_TIME = 'SLAVE_TIME'
EPOCH_DATE = 'EPOCH_DATE'
PYRATE_DATUM = 'DATUM'
PYRATE_TIME_SPAN = 'TIME_SPAN_YEAR'
PYRATE_WAVELENGTH_METRES = 'WAVELENGTH_METRES'
PYRATE_INCIDENCE_DEGREES = 'INCIDENCE_DEGREES'
PYRATE_INSAR_PROCESSOR = 'INSAR_PROCESSOR'
PYRATE_WEATHER_ERROR = 'WEATHER_ERROR'
PYRATE_APS_ERROR = 'APS_ERROR'
PYRATE_MAXVAR = 'CVD_MAXVAR'
PYRATE_ALPHA = 'CVD_ALPHA'
COHERENCE = 'COHERENCE_MASKED_MULTILOOKED_IFG'
MULTILOOKED = 'MULTILOOKED_IFG'
ORIG = 'ORIGINAL_IFG'
DEM = 'ORIGINAL_DEM'
MLOOKED_DEM = 'MULTILOOKED_DEM'
INCIDENCE = 'INCIDENCE_ANGLE_MAP'
MLOOKED_INC = 'MULTILOOKED_INCIDENCE_ANGLE_MAP'
INCR = 'INCREMENTAL_TIME_SLICE'
CUML = 'CUMULATIVE_TIME_SLICE'
LINRATE = 'LINEAR_RATE_MAP'
LINERROR = 'LINEAR_RATE_ERROR_MAP'
LINSAMP = 'LINEAR_RATE_SAMPLES'
PYRATE_ORBITAL_ERROR = 'ORBITAL_ERROR'
ORB_REMOVED = 'REMOVED'
APS_REMOVED = 'REMOVED'
PYRATE_REF_PHASE = 'REFERENCE_PHASE'
REF_PHASE_REMOVED = 'REMOVED'
NAN_STATUS = 'NAN_STATUS'
NAN_CONVERTED = 'CONVERTED'
DATA_TYPE = 'DATA_TYPE'
DATA_UNITS = 'DATA_UNITS'
DAYS_PER_YEAR = 365.25 # span of year, not a calendar year
YEARS_PER_DAY = 1 / DAY_PER_YEAR
SPEED_OF_LIGHT_METRES_PER_SECOND = 3e8
MM_PER_METRE = 1000
| # This Python module is part of the PyRate software package.
#
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains a collection of constants used in
various components of the PyRate software
"""
# lookup keys for the metadata fields in PyRate GeoTIFF files
PYRATE_NCOLS = 'NCOLS'
PYRATE_NROWS = 'NROWS'
PYRATE_X_STEP = 'X_STEP'
PYRATE_Y_STEP = 'Y_STEP'
PYRATE_LAT = 'LAT'
PYRATE_LONG = 'LONG'
MASTER_DATE = 'MASTER_DATE'
MASTER_TIME = 'MASTER_TIME'
SLAVE_DATE = 'SLAVE_DATE'
SLAVE_TIME = 'SLAVE_TIME'
EPOCH_DATE = 'EPOCH_DATE'
PYRATE_DATUM = 'DATUM'
PYRATE_TIME_SPAN = 'TIME_SPAN_YEAR'
PYRATE_WAVELENGTH_METRES = 'WAVELENGTH_METRES'
PYRATE_INCIDENCE_DEGREES = 'INCIDENCE_DEGREES'
PYRATE_INSAR_PROCESSOR = 'INSAR_PROCESSOR'
PYRATE_WEATHER_ERROR = 'WEATHER_ERROR'
PYRATE_APS_ERROR = 'APS_ERROR'
PYRATE_MAXVAR = 'CVD_MAXVAR'
PYRATE_ALPHA = 'CVD_ALPHA'
COHERENCE = 'COHERENCE_MASKED_MULTILOOKED_IFG'
MULTILOOKED = 'MULTILOOKED_IFG'
ORIG = 'ORIGINAL_IFG'
DEM = 'ORIGINAL_DEM'
MLOOKED_DEM = 'MULTILOOKED_DEM'
INCIDENCE = 'INCIDENCE_ANGLE_MAP'
MLOOKED_INC = 'MULTILOOKED_INCIDENCE_ANGLE_MAP'
INCR = 'INCREMENTAL_TIME_SLICE'
CUML = 'CUMULATIVE_TIME_SLICE'
LINRATE = 'LINEAR_RATE_MAP'
LINERROR = 'LINEAR_RATE_ERROR_MAP'
LINSAMP = 'LINEAR_RATE_SAMPLES'
PYRATE_ORBITAL_ERROR = 'ORBITAL_ERROR'
ORB_REMOVED = 'REMOVED'
APS_REMOVED = 'REMOVED'
PYRATE_REF_PHASE = 'REFERENCE_PHASE'
REF_PHASE_REMOVED = 'REMOVED'
NAN_STATUS = 'NAN_STATUS'
NAN_CONVERTED = 'CONVERTED'
DATA_TYPE = 'DATA_TYPE'
DATA_UNITS = 'DATA_UNITS'
DAYS_PER_YEAR = 365.25 # span of year, not a calendar year
SPEED_OF_LIGHT_METRES_PER_SECOND = 3e8
MM_PER_METRE = 1000
| Python | 0.999045 |
d43cf2adeb5bc5e5546dbf58532bfc283fc94ea8 | fix sort order of combined citation information | python/citation_vim/item.py | python/citation_vim/item.py | # -*- coding:utf-8 -*-
import collections
from citation_vim.utils import compat_str, is_current
class Item(object):
"""
Intermediary object between bibtex/zotero and unite source output.
"""
def combine(self):
pairs = collections.OrderedDict([
('Key', self.key),
('Title', self.title),
('Author(s)', self.author),
('Date', self.date),
('Tags', self.tags),
('Collections', ', '.join(self.collections)),
('Publication', self.publication),
('Issue', self.issue),
('Volume', self.volume),
('Pages', self.pages),
('Publisher', self.publisher),
('Language', self.language),
('Abstract', self.abstract),
('Notes', self.notes),
('File(s)', self.file),
('URL', self.url),
('DOI', self.doi),
('ISBN', self.isbn)
])
self.combined = u"Available citation information:\n"
for key, value in pairs.items():
if value:
self.combined += " " + key + " : " + compat_str(value) + "\n"
| # -*- coding:utf-8 -*-
from citation_vim.utils import compat_str, is_current
class Item(object):
"""
Intermediary object between bibtex/zotero and unite source output.
"""
def combine(self):
pairs = {
'Key': self.key,
'Title': self.title,
'Author(s)': self.author,
'Date': self.date,
'Tags': self.tags,
'Collections': ', '.join(self.collections),
'Publication': self.publication,
'Issue': self.issue,
'Volume': self.volume,
'Pages': self.pages,
'Publisher': self.publisher,
'Language': self.language,
'Abstract': self.abstract,
'Notes': self.notes,
'File(s)': self.file,
'URL': self.url,
'DOI': self.doi,
'ISBN': self.isbn}
self.combined = u"Available citation information:\n"
for key, value in pairs.items():
if value:
self.combined += " " + key + " : " + compat_str(value) + "\n"
| Python | 0.000497 |
7c787829b9c894f5c0de6dd50d6144e423d70f5c | uses ancient M2Crypto | python/cvmfs/certificate.py | python/cvmfs/certificate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
import M2Crypto
from M2Crypto import EVP, X509, m2, util
from distutils.version import LooseVersion, StrictVersion
from _common import CompressedObject
class Certificate(CompressedObject):
""" Wraps an X.509 certificate object as stored in CVMFS repositories """
def __init__(self, certificate_file):
CompressedObject.__init__(self, certificate_file)
cert = X509.load_cert_string(self.get_uncompressed_file().read())
self.openssl_certificate = cert
def __str__(self):
return "<Certificate " + self.get_fingerprint() + ">"
def __repr__(self):
return self.__str__()
def get_openssl_certificate(self):
""" return the certificate as M2Crypto.X509 object """
return self.openssl_certificate
def _get_fingerprint(self, algorithm='sha1'):
""" Workaround for RHEL5 with ancient version of M2Crypto """
if LooseVersion(M2Crypto.version) < StrictVersion("0.17"):
der = self.openssl_certificate.as_der()
md = EVP.MessageDigest(algorithm)
md.update(der)
digest = md.final()
return hex(util.octx_to_num(digest))[2:-1].upper()
else:
return self.openssl_certificate.get_fingerprint()
def _check_signature(self, pubkey, signature):
""" Workaround for RHEL5 with ancient version of M2Crypto """
if LooseVersion(M2Crypto.version) < StrictVersion("0.18"):
return m2.verify_final(pubkey.ctx, signature, pubkey.pkey)
else:
return pubkey.verify_final(signature)
def get_fingerprint(self, algorithm='sha1'):
""" returns the fingerprint of the X509 certificate """
fp = self._get_fingerprint()
return ':'.join([ x + y for x, y in zip(fp[0::2], fp[1::2]) ])
def verify(self, signature, message):
""" verify a given signature to an expected 'message' string """
pubkey = self.openssl_certificate.get_pubkey()
pubkey.reset_context(md='sha1')
pubkey.verify_init()
pubkey.verify_update(message)
return self._check_signature(pubkey, signature)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
from M2Crypto import X509
from _common import CompressedObject
class Certificate(CompressedObject):
""" Wraps an X.509 certificate object as stored in CVMFS repositories """
def __init__(self, certificate_file):
CompressedObject.__init__(self, certificate_file)
cert = X509.load_cert_string(self.get_uncompressed_file().read())
self.openssl_certificate = cert
def __str__(self):
return "<Certificate " + self.get_fingerprint() + ">"
def __repr__(self):
return self.__str__()
def get_openssl_certificate(self):
""" return the certificate as M2Crypto.X509 object """
return self.openssl_certificate
def get_fingerprint(self, algorithm='sha1'):
""" returns the fingerprint of the X509 certificate """
fp = self.openssl_certificate.get_fingerprint(algorithm)
return ':'.join([ x + y for x, y in zip(fp[0::2], fp[1::2]) ])
def verify(self, signature, message):
""" verify a given signature to an expected 'message' string """
pubkey = self.openssl_certificate.get_pubkey()
pubkey.reset_context(md='sha1')
pubkey.verify_init()
pubkey.verify_update(message)
return pubkey.verify_final(signature)
| Python | 0.999944 |
92762566c0e80e24d8954b9a4b2f7d148a3c89da | Use google-hosted jquery for admin page | python/ecep/portal/admin.py | python/ecep/portal/admin.py | # Copyright (c) 2012 Azavea, Inc.
# See LICENSE in the project root for copying permission
from portal.models import Location
from django.contrib.gis import admin
from django import forms
from portal.widgets import MapWidget
from django.contrib.gis.geos import Point
import re
from django.conf import settings
class LocationForm(forms.ModelForm):
"""Form subclass for location model form to use custom widget for google map
and a custom clean method to properly handle points passed in as strings
"""
geom = forms.CharField(label="Geocoded Point", widget=MapWidget())
def get_point(self, geom_string):
"""Takes a geom_string from cleaned_data and converts it to a point
object. If unable to convert, raises a validation error.
Arguments:
- `geom_string`: string returned by the 'geom' input from the LocationForm
Takes the form of 'POINT (<LNG> <LAT>)'
"""
try:
split_geom_string = re.findall(r'([-.\w]+)', geom_string)
lng = float(split_geom_string[1])
lat = float(split_geom_string[2])
return Point(lng, lat)
except (IndexError, ValueError):
raise forms.ValidationError("Invalid point specified for location")
def clean(self):
"""
Need to create a Point object from string returned by form because
of the way the map fills in the geocoded location form
"""
self.cleaned_data = super(LocationForm, self).clean()
try:
self.cleaned_data['geom'] = self.get_point(self.cleaned_data['geom'])
return self.cleaned_data
except forms.ValidationError:
# Need to pass a dummy point if invalid, or we get a 500 error
# This point does not get saved, nothing happens to it
self.cleaned_data['geom'] = Point(0, 0)
raise forms.ValidationError("Invalid point specified for location")
class Meta:
model = Location
class LocationAdmin(admin.OSMGeoAdmin):
class Media:
css = { 'all': ('css/admin-map.css',)}
js = ('http://maps.googleapis.com/maps/api/js?key=%s&sensor=false&language=%s' % (settings.GOOGLE_MAPS_KEY, settings.LANGUAGE_CODE), 'js/admin-map.js', "//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js")
list_display = ('site_name', 'address', 'zip', 'phone', 'id', )
list_filter = ('is_hs', 'is_ehs', 'accept_ccap', 'is_cps_based', 'is_community_based',
'is_age_lt_3', 'is_age_gt_3', 'is_full_day', 'is_full_week', 'is_full_year',
'is_part_day', 'is_part_week', 'is_school_year', 'is_home_visiting')
search_fields = ['site_name', 'address', 'zip', 'language_1', 'language_2', 'language_3']
readonly_fields = ['neighborhood']
form = LocationForm
fieldsets = [
(None, {'fields': ['site_name', 'neighborhood']}),
('Address', {'fields': [('address', 'city'), ('state', 'zip'), 'geom']}),
('Contact', {'fields': ['phone', 'url']}),
('Hours/Duration', {'fields': [('is_full_day', 'is_part_day'),
('is_full_week', 'is_part_week'),
('is_school_year', 'is_full_year')]}),
('Flags', {'fields': [('is_age_lt_3', 'is_age_gt_3'),
('is_community_based', 'is_cps_based'),
('is_hs', 'is_ehs'), 'accept_ccap']}),
('Other', {'fields': [('ages', 'prg_hours', 'accred'),
('language_1', 'language_2', 'language_3'),
'q_stmt']}),
]
admin.site.register(Location, LocationAdmin)
| # Copyright (c) 2012 Azavea, Inc.
# See LICENSE in the project root for copying permission
from portal.models import Location
from django.contrib.gis import admin
from django import forms
from portal.widgets import MapWidget
from django.contrib.gis.geos import Point
import re
from django.conf import settings
class LocationForm(forms.ModelForm):
"""Form subclass for location model form to use custom widget for google map
and a custom clean method to properly handle points passed in as strings
"""
geom = forms.CharField(label="Geocoded Point", widget=MapWidget())
def get_point(self, geom_string):
"""Takes a geom_string from cleaned_data and converts it to a point
object. If unable to convert, raises a validation error.
Arguments:
- `geom_string`: string returned by the 'geom' input from the LocationForm
Takes the form of 'POINT (<LNG> <LAT>)'
"""
try:
split_geom_string = re.findall(r'([-.\w]+)', geom_string)
lng = float(split_geom_string[1])
lat = float(split_geom_string[2])
return Point(lng, lat)
except (IndexError, ValueError):
raise forms.ValidationError("Invalid point specified for location")
def clean(self):
"""
Need to create a Point object from string returned by form because
of the way the map fills in the geocoded location form
"""
self.cleaned_data = super(LocationForm, self).clean()
try:
self.cleaned_data['geom'] = self.get_point(self.cleaned_data['geom'])
return self.cleaned_data
except forms.ValidationError:
# Need to pass a dummy point if invalid, or we get a 500 error
# This point does not get saved, nothing happens to it
self.cleaned_data['geom'] = Point(0, 0)
raise forms.ValidationError("Invalid point specified for location")
class Meta:
model = Location
class LocationAdmin(admin.OSMGeoAdmin):
class Media:
css = { 'all': ('css/admin-map.css',)}
js = ('http://maps.googleapis.com/maps/api/js?key=%s&sensor=false&language=%s' % (settings.GOOGLE_MAPS_KEY, settings.LANGUAGE_CODE), 'js/admin-map.js', 'js/jquery.js')
list_display = ('site_name', 'address', 'zip', 'phone', 'id', )
list_filter = ('is_hs', 'is_ehs', 'accept_ccap', 'is_cps_based', 'is_community_based',
'is_age_lt_3', 'is_age_gt_3', 'is_full_day', 'is_full_week', 'is_full_year',
'is_part_day', 'is_part_week', 'is_school_year', 'is_home_visiting')
search_fields = ['site_name', 'address', 'zip', 'language_1', 'language_2', 'language_3']
readonly_fields = ['neighborhood']
form = LocationForm
fieldsets = [
(None, {'fields': ['site_name', 'neighborhood']}),
('Address', {'fields': [('address', 'city'), ('state', 'zip'), 'geom']}),
('Contact', {'fields': ['phone', 'url']}),
('Hours/Duration', {'fields': [('is_full_day', 'is_part_day'),
('is_full_week', 'is_part_week'),
('is_school_year', 'is_full_year')]}),
('Flags', {'fields': [('is_age_lt_3', 'is_age_gt_3'),
('is_community_based', 'is_cps_based'),
('is_hs', 'is_ehs'), 'accept_ccap']}),
('Other', {'fields': [('ages', 'prg_hours', 'accred'),
('language_1', 'language_2', 'language_3'),
'q_stmt']}),
]
admin.site.register(Location, LocationAdmin)
| Python | 0 |
7fa8db8b52f6d066dc369912b0f9b227d71fa939 | Fix wex.value docsring | wex/value.py | wex/value.py | """ Extracted data values are represented with tab-separated fields.
The right-most field on each line is the value, all preceding fields
are labels that describe the value.
The labels and the value are all JSON encoded.
So for example, a value 9.99 with a labels ``product`` and ``price`` would
look like::
"product"\t"price"\t9.99\n
And we could decode this line with the following Python snippet:
.. code-block:: pycon
>>> import json
>>> line = '"product"\\t"price"\\t9.99\\n'
>>> [json.loads(s) for s in line.split('\\t')]
[u'product', u'price', 9.99]
Using tab-delimiters is convenient for downstream processing using Unix
command line tools such as :command:`cut` and :command:`grep`.
"""
import sys
from types import GeneratorType
from json import JSONEncoder
from functools import partial
from operator import itemgetter
from six import PY2, text_type
from six.moves import map
import logging; logger = logging.getLogger(__name__)
TAB = '\t'
NL = '\n'
if PY2:
JSONEncoder = partial(JSONEncoder, encoding='UTF-8')
json_encode = JSONEncoder(
skipkeys=False,
check_circular=True,
allow_nan=True,
indent=None,
separators=(',', ':'),
default=None,
sort_keys=True,
# may need to make this an argument at some point,
# but for now let's assume UTF-8 is ok on the output.
ensure_ascii=False,
).encode
class Value(tuple):
exit_on_exc = False
debug_on_exc = False
value = property(itemgetter(-1))
labels = property(itemgetter(slice(0, -1)))
def __new__(cls, value=(None,)):
if not isinstance(value, tuple):
value = (value,)
return super(Value, cls).__new__(cls, value)
def text(self):
""" Returns the text this value as a labelled JSON line. """
encoded = []
for field in self:
try:
encoded.append(json_encode(field))
except TypeError:
encoded.append('#' + text_type(repr(self.value)) + '!')
return TAB.join(encoded) + NL
def label(self, *labels):
""" Adds zero or more labels to this value. """
return self.__class__(tuple(map(text_type, labels)) + self)
def yield_values(extract, *args, **kw):
""" Yields ``Value`` objects extracted using ``extract``. """
exc_info = ()
try:
res = extract(*args, **kw)
if type(res) is GeneratorType:
for val in res:
yield Value(val)
else:
yield Value(res)
except Exception as exc:
exc_info = sys.exc_info()
yield Value(exc)
if any(exc_info) and (Value.exit_on_exc or Value.debug_on_exc):
if Value.debug_on_exc:
import pdb
pdb.post_mortem(exc_info[2])
else:
raise exc_info[0], exc_info[1], exc_info[2]
| """ Extracted data values are represented with tab-separated fields.
The right-most field on each line is the value, all preceding fields
are labels that describe the value.
The labels and the value are all JSON encoded.
So for example, a value 9.99 with a labels ``product`` and ``price`` would
look like::
"product"\t"price"\t9.99\n
And we could decode this line with the following Python snippet:
.. code-block:: pycon
>>> import json
>>> line = '"product"\\t"price"\\t9.99\\n'
>>> [json.loads(s) for s in line.split('\t')]
[u'product', u'price', 9.99]
Using tab-delimiters is convenient for downstream processing using Unix
command line tools such as :command:`cut` and :command:`grep`.
"""
import sys
from types import GeneratorType
from json import JSONEncoder
from functools import partial
from operator import itemgetter
from six import PY2, text_type
from six.moves import map
import logging; logger = logging.getLogger(__name__)
TAB = '\t'
NL = '\n'
if PY2:
JSONEncoder = partial(JSONEncoder, encoding='UTF-8')
json_encode = JSONEncoder(
skipkeys=False,
check_circular=True,
allow_nan=True,
indent=None,
separators=(',', ':'),
default=None,
sort_keys=True,
# may need to make this an argument at some point,
# but for now let's assume UTF-8 is ok on the output.
ensure_ascii=False,
).encode
class Value(tuple):
exit_on_exc = False
debug_on_exc = False
value = property(itemgetter(-1))
labels = property(itemgetter(slice(0, -1)))
def __new__(cls, value=(None,)):
if not isinstance(value, tuple):
value = (value,)
return super(Value, cls).__new__(cls, value)
def text(self):
""" Returns the text this value as a labelled JSON line. """
encoded = []
for field in self:
try:
encoded.append(json_encode(field))
except TypeError:
encoded.append('#' + text_type(repr(self.value)) + '!')
return TAB.join(encoded) + NL
def label(self, *labels):
""" Adds zero or more labels to this value. """
return self.__class__(tuple(map(text_type, labels)) + self)
def yield_values(extract, *args, **kw):
""" Yields ``Value`` objects extracted using ``extract``. """
exc_info = ()
try:
res = extract(*args, **kw)
if type(res) is GeneratorType:
for val in res:
yield Value(val)
else:
yield Value(res)
except Exception as exc:
exc_info = sys.exc_info()
yield Value(exc)
if any(exc_info) and (Value.exit_on_exc or Value.debug_on_exc):
if Value.debug_on_exc:
import pdb
pdb.post_mortem(exc_info[2])
else:
raise exc_info[0], exc_info[1], exc_info[2]
| Python | 0.000017 |
fb786e6fa254bf9b041b58ae3ba524257892bea8 | Make payloads larger for tests. | timelines.py | timelines.py | from sentry.utils.runner import configure
configure()
import contextlib
import functools
import logging
import random
import sys
import time
import uuid
from sentry.app import timelines
from sentry.timelines.redis import Record
logging.basicConfig(level=logging.DEBUG)
@contextlib.contextmanager
def timer(preamble):
start = time.time()
yield
print '{0} in {1} ms.'.format(preamble, (time.time() - start) * 1000)
# Load a bunch of records.
n_timelines = int(sys.argv[1])
n_records = int(sys.argv[2])
payload = ' ' * 12000
calls = []
with timer('Generated {0} records to be loaded into {1} timelines'.format(n_records, n_timelines)):
for i in xrange(0, n_records):
p = random.randint(1, n_timelines)
record = Record(uuid.uuid1().hex, payload, time.time())
calls.append(functools.partial(timelines.add, 'projects/{0}'.format(p), record))
with timer('Loaded {0} records'.format(len(calls))):
for call in calls:
call()
# Move them into the "ready" state.
ready = set()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time()):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process.
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
# Iterate through the records to ensure that all data is deserialized.
for i, record in enumerate(records, 1):
pass
# Run the scheduler again (using a future cutoff time to accomodate for backoff.)
ready.clear()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time() + timelines.backoff(1)):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process again (this should result in all of
# the items being taken out of the schedule.)
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
for i, record in enumerate(records, 1):
pass
# Check to make sure we're not leaking any data.
with timelines.cluster.all() as client:
result = client.keys('*')
for host, value in result.value.iteritems():
assert not value
| from sentry.utils.runner import configure
configure()
import contextlib
import functools
import logging
import random
import sys
import time
import uuid
from sentry.app import timelines
from sentry.timelines.redis import Record
logging.basicConfig(level=logging.DEBUG)
@contextlib.contextmanager
def timer(preamble):
start = time.time()
yield
print '{0} in {1} ms.'.format(preamble, (time.time() - start) * 1000)
# Load a bunch of records.
n_timelines = int(sys.argv[1])
n_records = int(sys.argv[2])
calls = []
with timer('Generated {0} records to be loaded into {1} timelines'.format(n_records, n_timelines)):
for i in xrange(0, n_records):
p = random.randint(1, n_timelines)
record = Record(uuid.uuid1().hex, 'payload', time.time())
calls.append(functools.partial(timelines.add, 'projects/{0}'.format(p), record))
with timer('Loaded {0} records'.format(len(calls))):
for call in calls:
call()
# Move them into the "ready" state.
ready = set()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time()):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process.
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
# Iterate through the records to ensure that all data is deserialized.
for i, record in enumerate(records, 1):
pass
# Run the scheduler again (using a future cutoff time to accomodate for backoff.)
ready.clear()
with timer('Scheduled timelines for digestion'):
for chunk in timelines.schedule(time.time() + timelines.backoff(1)):
for timeline, timestamp in chunk:
ready.add(timeline)
# Run them through the digestion process again (this should result in all of
# the items being taken out of the schedule.)
with timer('Digested {0} timelines'.format(len(ready))):
for timeline in ready:
with timelines.digest(timeline) as records:
i = 0
for i, record in enumerate(records, 1):
pass
# Check to make sure we're not leaking any data.
with timelines.cluster.all() as client:
result = client.keys('*')
for host, value in result.value.iteritems():
assert not value
| Python | 0.000001 |
4e3ebcf98e2bfb2cea1f92b66e5205194744482a | add level 11 | pythonchallenge/level_11.py | pythonchallenge/level_11.py | import unittest
import urllib
import requests
import logging
import re
import urllib
import os
import os.path
import Image
import ImageDraw
from StringIO import StringIO
# Default is warning, it's to suppress requests INFO log
logging.basicConfig(format='%(message)s')
def solution():
url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'
r = requests.get(url, auth=('huge', 'file'))
image_file = Image.open(StringIO(r.content))
new_image = Image.new('RGB', (640, 480), 'black')
new_image_stroke = ImageDraw.Draw(new_image)
for y in xrange(480):
for x in xrange(640):
if y % 2 == 0 and x % 2 == 0 or y % 2 == 1 and x % 2 == 1:
new_image.putpixel((x, y), image_file.getpixel((x, y)))
new_image.save('cave_edited.jpg')
return 'evil'
class SolutionTest(unittest.TestCase):
def setUp(self):
self.prefix = "http://www.pythonchallenge.com/pc/return/"
self.suffix = ".html"
def tearDown(self):
os.remove('cave_edited.jpg')
def test_solution(self):
actual = solution()
expected = 'evil'
cred = ('huge', 'file')
self.assertEquals(actual, expected)
origin_url = ''.join([self.prefix, 'evil', self.suffix])
try:
r = requests.get(origin_url, auth=cred)
except:
raise
self.assertTrue(r.ok)
next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line)
for line in r.iter_lines() if re.match(r'.*URL.*', line)]
r.close()
if len(next_entry) != 0:
r = requests.get(
''.join([self.prefix, next_entry[0], self.suffix], auth=expected))
logging.warn('Level 12 is %s with %s' % (r.url, cred))
else:
logging.warn('Level 12 is %s with %s' % (origin_url, cred))
if __name__ == "__main__":
unittest.main(failfast=True)
| import unittest
import urllib
import requests
import logging
import re
import urllib
import os
import os.path
import Image
import ImageDraw
# Default is warning, it's to suppress requests INFO log
logging.basicConfig(format='%(message)s')
def solution():
url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'
urllib.urlretrieve(url, 'cave.jpg')
image_file = Image.open('cave.jpg')
new_image = Image.new('RGB', (640, 480), 'black')
new_image_stroke = ImageDraw.Draw(new_image)
for y in range(480):
for x in range(640):
if y % 2 == 0 and x % 2 == 0 or y % 2 == 1 and x % 2 == 1:
new_image.putpixel((x, y), image_file.getpixel(x, y))
new_image.save('cave_edited.jpg')
return 'evil'
class SolutionTest(unittest.TestCase):
def setUp(self):
self.prefix = "http://www.pythonchallenge.com/pc/return/"
self.suffix = ".html"
def test_solution(self):
actual = solution()
expected = 'evil'
cred = ('huge', 'file')
self.assertEquals(actual, expected)
origin_url = ''.join([self.prefix, 'evil', self.suffix])
try:
r = requests.get(origin_url, auth=cred)
except:
raise
self.assertTrue(r.ok)
next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line)
for line in r.iter_lines() if re.match(r'.*URL.*', line)]
r.close()
if len(next_entry) != 0:
r = requests.get(
''.join([self.prefix, next_entry[0], self.suffix], auth=expected))
logging.warn('Level 12 is %s with %s' % (r.url, cred))
else:
logging.warn('Level 12 is %s with %s' % (origin_url, cred))
if __name__ == "__main__":
unittest.main(failfast=True)
| Python | 0.000496 |
fb5ad293c34387b1ab7b7b7df3aed3942fdd9282 | Add default to max_places in proposal form | src/webapp/activities/forms.py | src/webapp/activities/forms.py | # -*- encoding: utf-8 -*-
from django import forms
class ActivitySubscribeForm(forms.Form):
id = forms.IntegerField(
min_value = 0, required=True,
widget = forms.HiddenInput,
)
title = forms.CharField(
max_length=100, required=True,
widget = forms.HiddenInput,
)
class ProposalForm(forms.Form):
title = forms.CharField(
max_length=100, required=True,
)
subtitle = forms.CharField(
required = False,
widget = forms.Textarea,
)
duration = forms.CharField(
max_length=50, required=True,
)
max_places = forms.IntegerField(
min_value = 0, required=True, initial = 0,
)
show_owners = forms.BooleanField(
initial = False, required = False,
)
requires_inscription = forms.BooleanField(
initial = False, required = False,
)
owners = forms.CharField(
required = False,
widget = forms.Textarea,
)
organizers = forms.CharField(
required = False,
widget = forms.Textarea,
)
text = forms.CharField(
required = False,
widget = forms.Textarea,
)
logistics = forms.CharField(
required = False,
widget = forms.Textarea,
)
notes_organization = forms.CharField(
required = False,
widget = forms.Textarea,
)
| # -*- encoding: utf-8 -*-
from django import forms
class ActivitySubscribeForm(forms.Form):
id = forms.IntegerField(
min_value = 0, required=True,
widget = forms.HiddenInput,
)
title = forms.CharField(
max_length=100, required=True,
widget = forms.HiddenInput,
)
class ProposalForm(forms.Form):
title = forms.CharField(
max_length=100, required=True,
)
subtitle = forms.CharField(
required = False,
widget = forms.Textarea,
)
duration = forms.CharField(
max_length=50, required=True,
)
max_places = forms.IntegerField(
min_value = 0, required=True,
)
show_owners = forms.BooleanField(
initial = False, required = False,
)
requires_inscription = forms.BooleanField(
initial = False, required = False,
)
owners = forms.CharField(
required = False,
widget = forms.Textarea,
)
organizers = forms.CharField(
required = False,
widget = forms.Textarea,
)
text = forms.CharField(
required = False,
widget = forms.Textarea,
)
logistics = forms.CharField(
required = False,
widget = forms.Textarea,
)
notes_organization = forms.CharField(
required = False,
widget = forms.Textarea,
)
| Python | 0.000001 |
ac629293c43841817ca81073a28b683a6ec2a2ec | fix webshots support | src/webilder/webshots/utils.py | src/webilder/webshots/utils.py | import httplib
import urllib, urllib2
import cookielib
import re
import wbz
class WBZLoginException(Exception):
pass
class LeechPremiumOnlyPhotoError(Exception):
pass
class LeechHighQualityForPremiumOnlyError(Exception):
pass
def get_cookie(user, password):
"""Returns a webshots daily cookie given a user and a password."""
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open(
'http://www.webshots.com/login',
urllib.urlencode({'done': '',
'username': user,
'password': password,
'lbReferer': 'http://www.webshots.com/',
'action': 'lb'}))
r = response.read().lower()
if '"c":-1' in r:
raise WBZLoginException, 'Incorrect username or password.'
for cookie in cj:
if cookie.name=='daily':
return cookie.value
else:
raise WBZLoginException, "Cookie not found!"
def get_download_list(config):
if not config.get('webshots.enabled'):
return []
IMAGE_REGEX = r'<a href="(/pro/photo/([0-9]+)\?path=/archive)".*?<p title.*?>(.*?)</p>.*?<a href="(/entry.*?)" class="hiResLink"'
page = urllib.urlopen(
'http://www.webshots.com/pro/category/archive?sort=newest').read()
photos = re.findall(IMAGE_REGEX, page, re.DOTALL)
l = []
for image_link, photo, title, high_res_link in photos:
l.append({
'name': 'webshots_d%s.jpg' % photo,
'title': title,
'data': {
'photo': photo,
'image_link': image_link,
'high_res_link': high_res_link
}
});
return l
def get_photo_stream(config, photo):
cookie = config.get('webshots.cookie')
if not cookie:
cookie = get_cookie(
config.get('webshots.username'),
config.get('webshots.password'))
config.set('webshots.cookie', cookie)
config.save_config()
headers = {'Cookie':
'daily='+config.get('webshots.cookie')+';desktop-client=unknown;site-visits=1',
}
url = 'http://www.webshots.com' + photo['data']['high_res_link'].replace(
'res=high',
'res=%s' % config.get('webshots.quality'))
opener = urllib.FancyURLopener()
opener.addheader('Cookie',headers['Cookie'])
resp = opener.open(url)
if 'text/html' in resp.info().getheader('content-type'):
r = resp.read()
match = re.search(r'click <a href="(.*?)">here</a>', r)
if not match:
raise ValueError, "Unable to download photo %s" % photo['name']
url = match.groups()[0]
if not url.startswith('http://p.webshots.net/'):
raise ValueError, "Unable to download photo %s" % photo['name']
req = urllib2.Request(url, '', headers)
resp = urllib2.urlopen(req)
return resp
def process_photo(config, photo, f):
img = wbz.open(f, 'r')
metadata = img.get_metadata()
metadata['url'] = photo['data']['image_link']
data = img.get_image_data()
return data, metadata
| import httplib
import urllib, urllib2
import cookielib
import re
import wbz
class WBZLoginException(Exception):
pass
class LeechPremiumOnlyPhotoError(Exception):
pass
class LeechHighQualityForPremiumOnlyError(Exception):
pass
def get_cookie(user, password):
"""Returns a webshots daily cookie given a user and a password."""
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open(
'http://daily.webshots.com/login',
urllib.urlencode({'username': user,
'password': password,
'done': 'http://www.webshots.com/login'}))
r = response.read().lower()
if 'username or password' in r or 'username and password' in r:
raise WBZLoginException, 'Incorrect username or password.'
for cookie in cj:
if cookie.name=='daily':
return cookie.value
else:
raise WBZLoginException, "Cookie not found!"
def get_download_list(config):
if not config.get('webshots.enabled'):
return []
DAILYPIC_RE = r'(http://www.webshots.com/g/d.*/.*/([0-9]*).html)'
PHOTO_DESCRIPTION = r'alt="([^"]+)" src="http://p.webshots.com/ProThumbs/[0-9]+/%s_wallpaper150.jpg.*\n.*<em(.*)\n'
page = urllib.urlopen('http://www.webshots.com').read()
photos = re.findall(DAILYPIC_RE, page)
l = []
for image_link, photo in photos:
match = re.search(PHOTO_DESCRIPTION % photo, page)
if match:
title, nextline = match.groups()
is_premium = 'Premium Only' in nextline
else:
title, is_premium = '', False
l.append({
'name': 'webshots_d%s.jpg' % photo,
'title': title,
'data': {
'photo': photo,
'image_link': image_link,
'is_premium': is_premium,
}
});
return l
def get_photo_stream(config, photo):
cookie = config.get('webshots.cookie')
if not cookie:
cookie = get_cookie(
config.get('webshots.username'),
config.get('webshots.password'))
config.set('webshots.cookie', cookie)
config.save_config()
args = urllib.urlencode({
'res' : config.get('webshots.quality'),
'targetmode' : 'daily',
'photos' : photo['data']['photo']})
headers = {'Cookie':
'daily='+config.get('webshots.cookie'),
}
opener = urllib.FancyURLopener()
opener.addheader('Cookie',headers['Cookie'])
resp = opener.open('http://www.webshots.com/scripts/PhotoDownload.fcgi?'+args)
if 'text/html' in resp.info().getheader('content-type'):
r = resp.read()
if 'r/Premium/Popup/Exclusive' in r:
raise LeechPremiumOnlyPhotoError, "Only Webshots premium members can download this photo."
if ('r/Premium/Popup/Wide' in r) or ('r/Premium/Popup/High' in r):
raise LeechHighQualityForPremiumOnlyError, "Only Webshots Premium members can download highest quality or wide photos."
match = re.search("document.location.href='([^']+)'", r)
if match:
req = urllib2.Request('http://www.webshots.com' +
match.groups()[0], '', headers)
resp = urllib2.urlopen(req)
else:
raise ValueError, "Unable to download photo %s" % photo['name']
return resp
def process_photo(config, photo, f):
img = wbz.open(f, 'r')
metadata = img.get_metadata()
metadata['url'] = photo['data']['image_link']
data = img.get_image_data()
return data, metadata
| Python | 0 |
3b5f322d8fe76251b322b2d81cecf6abbee5e4bd | rename python class method | python/dllib/src/bigdl/dllib/feature/image/imagePreprocessing.py | python/dllib/src/bigdl/dllib/feature/image/imagePreprocessing.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import *
from zoo.feature.common import Preprocessing
if sys.version >= '3':
long = int
unicode = str
class Resize(Preprocessing):
"""
image resize
"""
def __init__(self, resizeH, resizeW, bigdl_type="float"):
super(Resize, self).__init__(bigdl_type, resizeH, resizeW)
class ChannelNormalizer(Preprocessing):
"""
image norm
"""
def __init__(self, meanR, meanG, meanB, stdR, stdG, stdB, bigdl_type="float"):
super(ChannelNormalizer, self).__init__(bigdl_type, meanR, meanG, meanB, stdR, stdG, stdB)
class MatToTensor(Preprocessing):
"""
MatToTensor
"""
def __init__(self, bigdl_type="float"):
super(MatToTensor, self).__init__(bigdl_type)
class CenterCrop(Preprocessing):
"""
CenterCrop
"""
def __init__(self, cropWidth, cropHeight, bigdl_type="float"):
super(CenterCrop, self).__init__(bigdl_type, cropWidth, cropHeight)
| #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import *
from zoo.feature.common import Preprocessing
if sys.version >= '3':
long = int
unicode = str
class Resize(Preprocessing):
"""
image resize
"""
def __init__(self, resizeH, resizeW, bigdl_type="float"):
super(Resize, self).__init__(bigdl_type, resizeH, resizeW)
class ChannelNormalizer(Preprocessing):
"""
image norm
"""
def __init__(self, meanR, meanG, meanB, stdR, stdG, stdB, bigdl_type="float"):
super(ChannelNormalizer, self).__init__(bigdl_type, meanR, meanG, meanB, stdR, stdG, stdB)
class MatToTensor(Preprocessing):
"""
MatToTensor
"""
def __init__(self, bigdl_type="float"):
super(MatToTensor, self).__init__(bigdl_type)
class CenterCrop(Preprocessing):
"""
CenterCrop
"""
def __init__(self, cropWidth, cropHeight, bigdl_type="float"):
super(CenterCrop, self).__init__(bigdl_type, cropWidth, cropHeight) | Python | 0.99888 |
6a84b885be67e8a9f424c2b36f50e8fe9347dbc9 | Remove duplicate constant in ipmi.py | rackattack/physical/ipmi.py | rackattack/physical/ipmi.py | import subprocess
import time
import logging
import multiprocessing.pool
class IPMI:
IPMITOOL_FILENAME = "ipmitool"
_CONCURRENCY = 4
_pool = None
def __init__(self, hostname, username, password):
self._hostname = hostname
self._username = username
self._password = password
if IPMI._pool is None:
IPMI._pool = multiprocessing.pool.ThreadPool(self._CONCURRENCY)
def off(self):
IPMI._pool.apply_async(self._powerCommand, args=("off",))
def powerCycle(self):
IPMI._pool.apply_async(self._powerCycle)
def _powerCycle(self):
self._powerCommand("off")
self._powerCommand("on")
def _powerCommand(self, command):
NUMBER_OF_RETRIES = 10
cmdLine = [
self.IPMITOOL_FILENAME, "power", command,
"-H", str(self._hostname), "-U", self._username, "-P", self._password]
for i in xrange(NUMBER_OF_RETRIES - 1):
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except:
time.sleep(0.1)
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except subprocess.CalledProcessError as e:
logging.error("Output: %(output)s", dict(output=e.output))
raise
| import subprocess
import time
import logging
import multiprocessing.pool
class IPMI:
IPMITOOL_FILENAME = "ipmitool"
_CONCURRENCY = 4
IPMITOOL_FILENAME = "ipmitool"
_pool = None
def __init__(self, hostname, username, password):
self._hostname = hostname
self._username = username
self._password = password
if IPMI._pool is None:
IPMI._pool = multiprocessing.pool.ThreadPool(self._CONCURRENCY)
def off(self):
IPMI._pool.apply_async(self._powerCommand, args=("off",))
def powerCycle(self):
IPMI._pool.apply_async(self._powerCycle)
def _powerCycle(self):
self._powerCommand("off")
self._powerCommand("on")
def _powerCommand(self, command):
NUMBER_OF_RETRIES = 10
cmdLine = [
self.IPMITOOL_FILENAME, "power", command,
"-H", str(self._hostname), "-U", self._username, "-P", self._password]
for i in xrange(NUMBER_OF_RETRIES - 1):
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except:
time.sleep(0.1)
try:
return subprocess.check_output(cmdLine, stderr=subprocess.STDOUT, close_fds=True)
except subprocess.CalledProcessError as e:
logging.error("Output: %(output)s", dict(output=e.output))
raise
| Python | 0.002215 |
356a7c4d83a5289e7b30a07b0f76829e274b7481 | Fix Eventlet transport on Python 3 | raven/transport/eventlet.py | raven/transport/eventlet.py | """
raven.transport.eventlet
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from raven.transport.http import HTTPTransport
try:
import eventlet
try:
from eventlet.green import urllib2 as eventlet_urllib2
except ImportError:
from eventlet.green.urllib import request as eventlet_urllib2
has_eventlet = True
except:
has_eventlet = False
class EventletHTTPTransport(HTTPTransport):
scheme = ['eventlet+http', 'eventlet+https']
def __init__(self, parsed_url, pool_size=100, **kwargs):
if not has_eventlet:
raise ImportError('EventletHTTPTransport requires eventlet.')
super(EventletHTTPTransport, self).__init__(parsed_url, **kwargs)
# remove the eventlet+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def _send_payload(self, payload):
req = eventlet_urllib2.Request(self._url, headers=payload[1])
try:
if sys.version_info < (2, 6):
response = eventlet_urllib2.urlopen(req, payload[0]).read()
else:
response = eventlet_urllib2.urlopen(req, payload[0],
self.timeout).read()
return response
except Exception as err:
return err
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (data, headers))
| """
raven.transport.eventlet
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from raven.transport.http import HTTPTransport
try:
import eventlet
from eventlet.green import urllib2 as eventlet_urllib2
has_eventlet = True
except:
has_eventlet = False
class EventletHTTPTransport(HTTPTransport):
scheme = ['eventlet+http', 'eventlet+https']
def __init__(self, parsed_url, pool_size=100, **kwargs):
if not has_eventlet:
raise ImportError('EventletHTTPTransport requires eventlet.')
super(EventletHTTPTransport, self).__init__(parsed_url, **kwargs)
# remove the eventlet+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def _send_payload(self, payload):
req = eventlet_urllib2.Request(self._url, headers=payload[1])
try:
if sys.version_info < (2, 6):
response = eventlet_urllib2.urlopen(req, payload[0]).read()
else:
response = eventlet_urllib2.urlopen(req, payload[0],
self.timeout).read()
return response
except Exception as err:
return err
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (data, headers))
| Python | 0.000002 |
b30befbf39009ed566dbb7ff725de05bad2be990 | Add link to permissions management doc for ExportTables. (#520) | rdr_client/export_tables.py | rdr_client/export_tables.py | # Exports the entire contents of database tables to Unicode CSV files stored in GCS.
# Used instead of Cloud SQL export because it handles newlines and null characters properly.
#
# Documentation of permissions management:
# https://docs.google.com/document/d/1vKiu2zcSy97DQTIuSezr030kTyeDthome9XzNy98B6M
#
# Usage: ./run_client.sh --project <PROJECT> --account <ACCOUNT> \
# --service_account exporter@<PROJECT>.iam.gserviceaccount.com export_tables.py \
# --database rdr --tables code,participant --directory test_directory
#
# "directory" indicates a directory inside the GCS bucket to write the files to
#
# If "rdr" is chosen for the database, the data will be written to <ENVIRONMENT>-rdr-export;
# If "cdm" or "voc" are chosen, the data will be written to <ENVIRONMENT>-cdm.
import logging
from client import Client
from main_util import get_parser, configure_logging
def export_tables(client):
table_names = client.args.tables.split(',')
logging.info('Exporting %s from %s to %s' % (table_names, client.args.database,
client.args.directory))
request_body = {'database': client.args.database,
'tables': table_names,
'directory': client.args.directory}
response = client.request_json('ExportTables', 'POST', request_body)
logging.info('Data is being exported to: %s' % response['destination'])
if __name__ == '__main__':
configure_logging()
parser = get_parser()
parser.add_argument('--database', help='The database to export data from', required=True)
parser.add_argument('--tables', help='A comma-separated list of tables to export',
required=True)
parser.add_argument('--directory',
help='A directory to write CSV output to inside the GCS bucket',
required=True)
export_tables(Client(parser=parser, base_path='offline'))
| # Exports the entire contents of database tables to Unicode CSV files stored in GCS.
# Used instead of Cloud SQL export because it handles newlines and null characters properly.
#
# Usage: ./run_client.sh --project <PROJECT> --account <ACCOUNT> \
# --service_account exporter@<PROJECT>.iam.gserviceaccount.com export_tables.py \
# --database rdr --tables code,participant --directory test_directory
#
# "directory" indicates a directory inside the GCS bucket to write the files to
#
# If "rdr" is chosen for the database, the data will be written to <ENVIRONMENT>-rdr-export;
# If "cdm" or "voc" are chosen, the data will be written to <ENVIRONMENT>-cdm.
import logging
from client import Client
from main_util import get_parser, configure_logging
def export_tables(client):
table_names = client.args.tables.split(',')
logging.info('Exporting %s from %s to %s' % (table_names, client.args.database,
client.args.directory))
request_body = {'database': client.args.database,
'tables': table_names,
'directory': client.args.directory}
response = client.request_json('ExportTables', 'POST', request_body)
logging.info('Data is being exported to: %s' % response['destination'])
if __name__ == '__main__':
configure_logging()
parser = get_parser()
parser.add_argument('--database', help='The database to export data from', required=True)
parser.add_argument('--tables', help='A comma-separated list of tables to export',
required=True)
parser.add_argument('--directory',
help='A directory to write CSV output to inside the GCS bucket',
required=True)
export_tables(Client(parser=parser, base_path='offline'))
| Python | 0 |
f210ef3e6b4122c75b4df9eee6be6ee4ac81efa4 | Remove a useless table from the db | www/scripts/generate_db.py | www/scripts/generate_db.py | #!/usr/bin/python
# This script has to generate the sqlite database
#
# Requirements (import from):
# - sqlite3
#
# Syntax:
# ./generate_db.py
import sqlite3
import sys
from os import path
SCRIPT_PATH = path.dirname(__file__)
DEFAULT_DB = path.join(SCRIPT_PATH, "../mymoney.db")
def generate_tables(db=DEFAULT_DB):
conn = sqlite3.connect(db)
with conn:
c = conn.cursor()
# Drop tables if they exist
c.execute('''DROP TABLE IF EXISTS node''')
c.execute('''DROP TABLE IF EXISTS expense''')
c.execute('''DROP TABLE IF EXISTS node_expense''')
# Create tables
c.execute('''CREATE TABLE IF NOT EXISTS node (
id INTEGER PRIMARY KEY,
parent_id INTEGER,
title TEXT NOT NULL,
FOREIGN KEY(parent_id) REFERENCES node(id))''')
c.execute('''CREATE TABLE IF NOT EXISTS expense (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
date INTEGER NOT NULL,
value REAL NOT NULL)''')
c.execute('''CREATE TABLE IF NOT EXISTS node_expense (
expense_id INTEGER,
node_id INTEGER,
PRIMARY KEY(expense_id, node_id),
FOREIGN KEY(expense_id) REFERENCES expense(id),
FOREIGN KEY(node_id) REFERENCES node(id))''')
# Commit the changes
conn.commit()
if __name__ == '__main__':
generate_tables(DEFAULT_DB)
| #!/usr/bin/python
# This script has to generate the sqlite database
#
# Requirements (import from):
# - sqlite3
#
# Syntax:
# ./generate_db.py
import sqlite3
import sys
from os import path
SCRIPT_PATH = path.dirname(__file__)
DEFAULT_DB = path.join(SCRIPT_PATH, "../mymoney.db")
def generate_tables(db=DEFAULT_DB):
conn = sqlite3.connect(db)
with conn:
c = conn.cursor()
# Drop tables if they exist
c.execute('''DROP TABLE IF EXISTS node''')
c.execute('''DROP TABLE IF EXISTS node_hierarchy''')
c.execute('''DROP TABLE IF EXISTS expense''')
c.execute('''DROP TABLE IF EXISTS node_expense''')
# Create tables
c.execute('''CREATE TABLE IF NOT EXISTS node (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL)''')
c.execute('''CREATE TABLE IF NOT EXISTS node_hierarchy (
id INTEGER PRIMARY KEY,
parent_id INTEGER,
child_id INTEGER,
FOREIGN KEY(parent_id) REFERENCES node(id),
FOREIGN KEY(child_id) REFERENCES node(id),
UNIQUE(child_id))''')
c.execute('''CREATE TABLE IF NOT EXISTS expense (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
date INTEGER NOT NULL,
value REAL NOT NULL)''')
c.execute('''CREATE TABLE IF NOT EXISTS node_expense (
expense_id INTEGER,
node_id INTEGER,
PRIMARY KEY(expense_id, node_id),
FOREIGN KEY(expense_id) REFERENCES expense(id),
FOREIGN KEY(node_id) REFERENCES node(id))''')
# Commit the changes
conn.commit()
if __name__ == '__main__':
generate_tables(DEFAULT_DB)
| Python | 0.000001 |
4c88b3f5daa1e8e147dedfa4a103216565469f93 | Fix timezone-naive datetime being compared to aware datetime. | zerver/lib/logging_util.py | zerver/lib/logging_util.py | from __future__ import absolute_import
from django.utils import timezone
import hashlib
import logging
import traceback
from datetime import datetime, timedelta
from zerver.lib.str_utils import force_bytes
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter(object):
last_error = datetime.min.replace(tzinfo=timezone.utc)
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = force_bytes('\n'.join(traceback.format_exception(*record.exc_info)))
else:
tb = force_bytes(str(record))
key = self.__class__.__name__.upper() + hashlib.sha1(tb).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone.now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone.now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return True
class RequireReallyDeployed(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record):
# type: (logging.LogRecord) -> bool
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
def skip_site_packages_logs(record):
# type: (logging.LogRecord) -> bool
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
| from __future__ import absolute_import
from django.utils import timezone
import hashlib
import logging
import traceback
from datetime import datetime, timedelta
from zerver.lib.str_utils import force_bytes
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
class _RateLimitFilter(object):
last_error = datetime.min
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
from django.core.cache import cache
# Track duplicate errors
duplicate = False
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
600) # seconds
if rate > 0:
# Test if the cache works
try:
cache.set('RLF_TEST_KEY', 1, 1)
use_cache = cache.get('RLF_TEST_KEY') == 1
except Exception:
use_cache = False
if use_cache:
if record.exc_info is not None:
tb = force_bytes('\n'.join(traceback.format_exception(*record.exc_info)))
else:
tb = force_bytes(str(record))
key = self.__class__.__name__.upper() + hashlib.sha1(tb).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone.now() - timedelta(seconds=rate)
duplicate = (self.last_error >= min_date)
if not duplicate:
self.last_error = timezone.now()
return not duplicate
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
return True
class RequireReallyDeployed(logging.Filter):
def filter(self, record):
# type: (logging.LogRecord) -> bool
from django.conf import settings
return settings.PRODUCTION
def skip_200_and_304(record):
# type: (logging.LogRecord) -> bool
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, 'status_code') in [200, 304]:
return False
return True
def skip_site_packages_logs(record):
# type: (logging.LogRecord) -> bool
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if 'site-packages' in record.pathname:
return False
return True
| Python | 0 |
c02036f26bfd1eb6b1fed2dc10c73c91e97dae0b | Update __init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(message, objects.BaseObject):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
super(ClusterMessage, self).__init__(**cluster_message)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| Python | 0.000072 |
551dddbb80d512ec49d8a422b52c24e98c97b38c | Add waiting for new data to parse | tsparser/main.py | tsparser/main.py | from time import sleep
from tsparser import config
from tsparser.parser import BaseParser, ParseException
from tsparser.parser.gps import GPSParser
from tsparser.parser.imu import IMUParser
from tsparser.sender import Sender
def parse(input_file=None):
"""
Parse the file specified as input.
:param input_file: file to read input from. If None, then pipe specified
in config is used
:type input_file: file
"""
Sender(daemon=True).start()
if input_file is None:
input_file = open(config.PIPE_NAME, 'r')
parsers = _get_parsers()
while True:
line = input_file.readline()
if not line:
sleep(0.01)
continue
_parse_line(parsers, line)
def _get_parsers():
return [
IMUParser(),
GPSParser()
]
def _parse_line(parsers, line):
values = line.split(',')
BaseParser.timestamp = values.pop().strip()
for parser in parsers:
if parser.parse(line, *values):
break
else:
raise ParseException('Output line was not parsed by any parser: {}'
.format(line))
| from tsparser import config
from tsparser.parser import BaseParser, ParseException
from tsparser.parser.gps import GPSParser
from tsparser.parser.imu import IMUParser
from tsparser.sender import Sender
def parse(input_file=None):
"""
Parse the file specified as input.
:param input_file: file to read input from. If None, then pipe specified
in config is used
:type input_file: file
"""
Sender(daemon=True).start()
if input_file is None:
input_file = open(config.PIPE_NAME, 'r')
parsers = _get_parsers()
while True:
line = input_file.readline()
if not line:
continue
_parse_line(parsers, line)
def _get_parsers():
return [
IMUParser(),
GPSParser()
]
def _parse_line(parsers, line):
values = line.split(',')
BaseParser.timestamp = values.pop().strip()
for parser in parsers:
if parser.parse(line, *values):
break
else:
raise ParseException('Output line was not parsed by any parser: {}'
.format(line))
| Python | 0 |
d2a0c928b9cdb693ca75731e1ae2cefb4c7ae722 | fix Episode JSON export | tvd/core/json.py | tvd/core/json.py | #!/usr/bin/env python
# encoding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2014 CNRS (Hervé BREDIN -- http://herve.niderb.fr/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import unicode_literals
import simplejson as json
import pyannote.core.json
TVD_JSON = 'tvd'
def object_hook(d):
"""
Usage
-----
>>> with open('file.json', 'r') as f:
... json.load(f, object_hook=object_hook)
"""
from episode import Episode
if TVD_JSON in d:
if d[TVD_JSON] == 'Episode':
return Episode.from_json(d)
d = pyannote.core.json.object_hook(d)
return d
def load(path):
with open(path, 'r') as f:
data = json.load(f, encoding='utf-8', object_hook=object_hook)
return data
def dump(data, path):
with open(path, 'w') as f:
json.dump(data, f, encoding='utf-8', for_json=True)
| #!/usr/bin/env python
# encoding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2014 CNRS (Hervé BREDIN -- http://herve.niderb.fr/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import unicode_literals
import simplejson as json
import pyannote.core.json
TVD_JSON = 'tvd'
def object_hook(d):
"""
Usage
-----
>>> with open('file.json', 'r') as f:
... json.load(f, object_hook=object_hook)
"""
from episode import Episode
if TVD_JSON in d:
if d[TVD_JSON] == 'episode':
return Episode.from_json(d)
d = pyannote.core.json.object_hook(d)
return d
def load(path):
with open(path, 'r') as f:
data = json.load(f, encoding='utf-8', object_hook=object_hook)
return data
def dump(data, path):
with open(path, 'w') as f:
json.dump(data, f, encoding='utf-8', for_json=True)
| Python | 0.000008 |
cd4da2e0fbed7bbadd4b110f45b7356795075aeb | add min_level to Logger | twiggy/Logger.py | twiggy/Logger.py | from Message import Message
import Levels
class Logger(object):
__slots__ = ['_fields', 'emitters', 'min_level']
def __init__(self, fields = None, emitters = None, min_level = Levels.DEBUG):
self._fields = fields if fields is not None else {}
self.emitters = emitters if emitters is not None else {}
self.min_level = min_level
def fields(self, **kwargs):
new_fields = self._fields.copy().update(**kwargs)
return self.__class__(new_fields, self.emitters, self.min_level)
def name(self, name):
return self.fields(name=name)
def struct(self, **kwargs):
self.fields(**kwargs).info()
def _emit(self, level, format_spec = '', *args, **kwargs):
if level < self.min_level: return
msg = Message(level, format_spec, self._fields.copy(), *args, **kwargs)
for emitter in self.emitters.itervalues():
if emitter.min_level >= msg.level:
# XXX add appropriate error trapping & logging; watch for recursion
emitter.emit(msg)
def debug(self, *args, **kwargs):
self._emit(Levels.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
self._emit(Levels.INFO, *args, **kwargs)
def warning(self, *args, **kwargs):
self._emit(Levels.WARNING, *args, **kwargs)
def error(self, *args, **kwargs):
self._emit(Levels.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
self._emit(Levels.CRITICAL, *args, **kwargs) | from Message import Message
import Levels
class Logger(object):
__slots__ = ['_fields', 'emitters']
def __init__(self, fields = None, emitters = None):
self._fields = fields if fields is not None else {}
self.emitters = emitters if emitters is not None else {}
def fields(self, **kwargs):
new_fields = self._fields.copy().update(**kwargs)
return self.__class__(new_fields, self.emitters)
def name(self, name):
return self.fields(name=name)
def struct(self, **kwargs):
self.fields(**kwargs).info()
def _emit(self, level, format_spec = '', *args, **kwargs):
msg = Message(level, format_spec, self._fields.copy(), *args, **kwargs)
for emitter in self.emitters.itervalues():
if emitter.min_level >= msg.level:
# XXX add appropriate error trapping & logging; watch for recursion
emitter.emit(msg)
def debug(self, *args, **kwargs):
self._emit(Levels.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
self._emit(Levels.INFO, *args, **kwargs)
def warning(self, *args, **kwargs):
self._emit(Levels.WARNING, *args, **kwargs)
def error(self, *args, **kwargs):
self._emit(Levels.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
self._emit(Levels.CRITICAL, *args, **kwargs) | Python | 0.000001 |
a50c7c32f28d6f6e0ba369fc91f67f90edda7a66 | Add a processing function to the server to simplify end of burst | txircd/server.py | txircd/server.py | from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.task import LoopingCall
from twisted.words.protocols.irc import IRC
class IRCServer(IRC):
def __init__(self, ircd, ip, received):
self.ircd = ircd
self.serverID = None
self.name = None
self.description = None
self.ip = ip
self.remoteServers = {}
self.nextClosest = self.ircd.serverID
self.cache = {}
self.bursted = None
self.disconnectedDeferred = Deferred()
self.receivedConnection = received
self._pinger = LoopingCall(self._ping)
self._registrationTimeoutTimer = reactor.callLater(self.ircd.config.getWithDefault("server_registration_timeout", 10), self._timeoutRegistration)
def handleCommand(self, command, prefix, params):
if command not in self.ircd.serverCommands:
self.disconnect("Unknown command {}".format(command)) # If we receive a command we don't recognize, abort immediately to avoid a desync
return
if self.bursted is False:
if "burst_queue" not in self.cache:
self.cache["burst_queue"] = []
self.cache["burst_queue"].append((command, prefix, params))
return
handlers = self.ircd.serverCommands[command]
data = None
for handler in handlers:
data = handler[0].parseParams(self, params, prefix, {})
if data is not None:
break
if data is None:
self.disconnect("Failed to parse command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # If we receive a command we can't parse, also abort immediately
return
for handler in handlers:
if handler[0].execute(self, data):
break
else:
self.disconnect("Couldn't process command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # Also abort connection if we can't process a command
return
def endBurst(self):
self.bursted = True
for command, prefix, params in self.cache["burst_queue"]:
self.handleCommand(command, prefix, params)
del self.cache["burst_queue"]
def connectionLost(self, reason):
if self.serverID in self.ircd.servers:
self.disconnect("Connection reset")
self.disconnectedDeferred.callback(None)
def disconnect(self, reason):
self.ircd.runActionStandard("serverquit", self, reason)
del self.ircd.servers[self.serverID]
del self.ircd.serverNames[self.name]
netsplitQuitMsg = "{} {}".format(self.ircd.servers[self.nextClosest].name if self.nextClosest in self.ircd.servers else self.ircd.name, self.name)
allUsers = self.ircd.users.values()
for user in allUsers:
if user.uuid[:3] == self.serverID or user.uuid[:3] in self.remoteServers:
user.disconnect(netsplitQuitMsg)
self._endConnection()
def _endConnection(self):
self.transport.loseConnection()
def _timeoutRegistration(self):
if self.serverID and self.name:
self._pinger.start(self.ircd.config.getWithDefault("server_ping_frequency", 60))
return
self.disconnect("Registration timeout")
def _ping(self):
self.ircd.runActionStandard("pingserver", self)
def register():
if not self.serverID:
return
if not self.name:
return
self.ircd.servers[self.serverID] = self
self.ircd.serverNames[self.name] = self.serverID
self.ircd.runActionStandard("serverconnect", self)
class RemoteServer(IRCServer):
def __init__(self, ircd, ip):
IRCServer.__init__(self, ircd, ip, True)
self._registrationTimeoutTimer.cancel()
def sendMessage(self, command, *params, **kw):
target = self
while target.nextClosest != self.ircd.serverID:
target = self.ircd.servers[target.nextClosest]
target.sendMessage(command, *params, **kw)
def _endConnection(self):
pass | from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.task import LoopingCall
from twisted.words.protocols.irc import IRC
class IRCServer(IRC):
def __init__(self, ircd, ip, received):
self.ircd = ircd
self.serverID = None
self.name = None
self.description = None
self.ip = ip
self.remoteServers = {}
self.nextClosest = self.ircd.serverID
self.cache = {}
self.bursted = None
self.disconnectedDeferred = Deferred()
self.receivedConnection = received
self._pinger = LoopingCall(self._ping)
self._registrationTimeoutTimer = reactor.callLater(self.ircd.config.getWithDefault("server_registration_timeout", 10), self._timeoutRegistration)
def handleCommand(self, command, prefix, params):
if command not in self.ircd.serverCommands:
self.disconnect("Unknown command {}".format(command)) # If we receive a command we don't recognize, abort immediately to avoid a desync
return
if self.bursted is False:
if "burst_queue" not in self.cache:
self.cache["burst_queue"] = []
self.cache["burst_queue"].append((command, prefix, params))
return
handlers = self.ircd.serverCommands[command]
data = None
for handler in handlers:
data = handler[0].parseParams(self, params, prefix, {})
if data is not None:
break
if data is None:
self.disconnect("Failed to parse command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # If we receive a command we can't parse, also abort immediately
return
for handler in handlers:
if handler[0].execute(self, data):
break
else:
self.disconnect("Couldn't process command {} from {} with parameters '{}'".format(command, prefix, " ".join(params))) # Also abort connection if we can't process a command
return
def connectionLost(self, reason):
if self.serverID in self.ircd.servers:
self.disconnect("Connection reset")
self.disconnectedDeferred.callback(None)
def disconnect(self, reason):
self.ircd.runActionStandard("serverquit", self, reason)
del self.ircd.servers[self.serverID]
del self.ircd.serverNames[self.name]
netsplitQuitMsg = "{} {}".format(self.ircd.servers[self.nextClosest].name if self.nextClosest in self.ircd.servers else self.ircd.name, self.name)
allUsers = self.ircd.users.values()
for user in allUsers:
if user.uuid[:3] == self.serverID or user.uuid[:3] in self.remoteServers:
user.disconnect(netsplitQuitMsg)
self._endConnection()
def _endConnection(self):
self.transport.loseConnection()
def _timeoutRegistration(self):
if self.serverID and self.name:
self._pinger.start(self.ircd.config.getWithDefault("server_ping_frequency", 60))
return
self.disconnect("Registration timeout")
def _ping(self):
self.ircd.runActionStandard("pingserver", self)
def register():
if not self.serverID:
return
if not self.name:
return
self.ircd.servers[self.serverID] = self
self.ircd.serverNames[self.name] = self.serverID
self.ircd.runActionStandard("serverconnect", self)
class RemoteServer(IRCServer):
def __init__(self, ircd, ip):
IRCServer.__init__(self, ircd, ip, True)
self._registrationTimeoutTimer.cancel()
def sendMessage(self, command, *params, **kw):
target = self
while target.nextClosest != self.ircd.serverID:
target = self.ircd.servers[target.nextClosest]
target.sendMessage(command, *params, **kw)
def _endConnection(self):
pass | Python | 0.000001 |
04f2c9005a04559a48ad0919b840d709c0f4eeaa | Update version. | neupy/__init__.py | neupy/__init__.py | """
NeuPy is the Artificial Neural Network library implemented in Python.
"""
__version__ = '0.1.1'
| """
NeuPy is the Artificial Neural Network library implemented in Python.
"""
__version__ = '0.1.1a'
| Python | 0 |
b2859bfde66d7d91f98e3cfb61e205c1d2f5dbfe | Make CommentFactory use fuzzy attrs | hackernews_scraper/test/factories.py | hackernews_scraper/test/factories.py | from datetime import datetime, timedelta
import factory
from factory.fuzzy import FuzzyText, FuzzyInteger
import time
class ItemFactory(factory.Factory):
FACTORY_FOR = dict
objectID = 21
created_at_i = 42
title = "Test item"
class CommentFactory(factory.Factory):
FACTORY_FOR = dict
@factory.sequence
def created_at(n):
return (datetime.now() - timedelta(minutes=n)).isoformat()
@factory.sequence
def created_at_i(n):
return time.time() - n
title = FuzzyText(length=20)
url = "www.google.com"
comment_text = FuzzyText(length=300)
story_id = 42
story_title = "Bear kills man"
story_url = "www.bing.com"
author = FuzzyText(length=10)
points = FuzzyInteger(100)
objectID = FuzzyInteger(100)
parent_id = FuzzyInteger(100)
class StoryFactory(factory.Factory):
FACTORY_FOR = dict
created_at_i = 42
title = "Test story"
url = "www.google.com"
author = "yourdad"
points = 42
story_text = "Fuzzy wuzzy had no hair"
story_id = 42
class ResponseFactory(factory.Factory):
FACTORY_FOR = dict
nbPages = 1
hits = [ItemFactory(), ItemFactory()]
nbHits = factory.LazyAttribute(lambda x: x.nbPages * len(x.hits))
hitsPerPage = factory.LazyAttribute(lambda x: len(x.hits))
| import factory
class ItemFactory(factory.Factory):
FACTORY_FOR = dict
objectID = 21
created_at_i = 42
title = "Test item"
class CommentFactory(factory.Factory):
FACTORY_FOR = dict
created_at = "2014-04-03T10:17:28.000Z"
title = "Test comment"
url = "www.google.com"
comment_text = "Fuzzy wuzzy was a bear"
story_id = 42
story_title = "Bear kills man"
story_url = "www.bing.com"
author = "yourmom"
points = 42
created_at_i = 42
objectID = 42
parent_id = 42
class StoryFactory(factory.Factory):
FACTORY_FOR = dict
created_at = "2014-04-03T10:17:28.000Z"
created_at_i = 42
title = "Test story"
url = "www.google.com"
author = "yourdad"
points = 42
story_text = "Fuzzy wuzzy had no hair"
story_id = 42
class ResponseFactory(factory.Factory):
FACTORY_FOR = dict
nbPages = 1
hits = [ItemFactory(), ItemFactory()]
nbHits = factory.LazyAttribute(lambda x: x.nbPages * len(x.hits))
hitsPerPage = factory.LazyAttribute(lambda x: len(x.hits))
| Python | 0 |
8039e38ae806bc3aecfa2cb9824ebfd1c9fdc10e | Revert "Potential fix for cell caching issue" | powershell_kernel/powershell_proxy.py | powershell_kernel/powershell_proxy.py | import threading
try:
import queue
except ImportError:
import Queue as queue
from threading import Timer
from time import sleep
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = queue.Queue()
self.start()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class ReplProxy(object):
def __init__(self, repl):
self._repl = repl
self._repl_reader = ReplReader(repl)
# this is a hack to detect when we stop processing this input
self.send_input('function prompt() {"^"}')
self.stop_flag = False
self.output = ''
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
# get preambula and eveluation of the prompt
self.get_output()
self.output_prefix_stripped = True
self.expected_output_prefix = ''
self.expected_output_len = 0
def get_output(self):
while not self.stop_flag:
sleep(0.05)
out = self.output
self.output = ''
self.stop_flag = False
return out
def send_input(self, input):
# TODO: we should block here until we return output for previous command, should we?
# for multiline statements we should send 1 extra new line
# https://stackoverflow.com/questions/13229066/how-to-end-a-multi-line-command-in-powershell
if '\n' in input:
input += '\n'
self.expected_output_prefix = input.replace('\n', '\n>> ') + '\n'
self.expected_output_len = len(self.expected_output_prefix)
self.output_prefix_stripped = False
self._repl.write(input + '\n')
def handle_repl_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
if self.stop_flag:
return True
try:
while True:
packet = self._repl_reader.queue.get_nowait()
if packet is None:
return False
self.write(packet)
except queue.Empty:
return True
def update_view_loop(self):
is_still_working = self.handle_repl_output()
if is_still_working:
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
else:
self.write("\n***Repl Killed***\n""")
def write(self, packet):
# this is a hack to detect when we stop processing this input
if packet == '^':
self.stop_flag = True
return
self.output += packet
if not self.output_prefix_stripped and len(self.output) >= self.expected_output_len:
if self.output[:self.expected_output_len] != self.expected_output_prefix:
print("Unexpected prefix: %r : Expected %r" % (
self.output[:self.expected_output_len], self.expected_output_prefix
))
else:
self.output_prefix_stripped = True
self.output = self.output[self.expected_output_len:]
| import threading
try:
import queue
except ImportError:
import Queue as queue
from threading import Timer
from time import sleep
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = queue.Queue()
self.start()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class ReplProxy(object):
def __init__(self, repl):
self._repl = repl
self.expected_carets = 1
self._repl_reader = ReplReader(repl)
# this is a hack to detect when we stop processing this input
self.send_input('function prompt() {"^"}')
self.stop_flag = False
self.output = ''
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
# get preambula and eveluation of the prompt
self.get_output()
self.output_prefix_stripped = True
self.expected_output_prefix = ''
self.expected_output_len = 0
def get_output(self):
while not self.stop_flag:
sleep(0.05)
out = self.output
self.output = ''
self.stop_flag = False
return out
def send_input(self, input):
# TODO: we should block here until we return output for previous command, should we?
# for multiline statements we should send 1 extra new line
# https://stackoverflow.com/questions/13229066/how-to-end-a-multi-line-command-in-powershell
if '\n' in input:
input += '\n'
self.expected_carets = input.count('\n')
self.expected_output_prefix = input.replace('\n', '\n>> ') + '\n'
self.expected_output_len = len(self.expected_output_prefix)
self.output_prefix_stripped = False
self._repl.write(input + '\n')
def handle_repl_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
if self.stop_flag:
return True
try:
while True:
packet = self._repl_reader.queue.get_nowait()
if packet is None:
return False
self.write(packet)
except queue.Empty:
return True
def update_view_loop(self):
is_still_working = self.handle_repl_output()
if is_still_working:
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
else:
self.write("\n***Repl Killed***\n""")
def write(self, packet):
# this is a hack to detect when we stop processing this input
if packet == '^':
self.expected_carets -= 1
if self.expected_carets < 1:
self.stop_flag = True
return
self.output += packet
if not self.output_prefix_stripped and len(self.output) >= self.expected_output_len:
if self.output[:self.expected_output_len] != self.expected_output_prefix:
print("Unexpected prefix: %r : Expected %r" % (
self.output[:self.expected_output_len], self.expected_output_prefix
))
else:
self.output_prefix_stripped = True
self.output = self.output[self.expected_output_len:]
| Python | 0 |
5e1e0ba1dca301eb597fb319c68280f7ee761037 | Add twopeasandtheirpod and simplyrecipes to __init__ | recipe_scrapers/__init__.py | recipe_scrapers/__init__.py | import re
from .allrecipes import AllRecipes
from .simplyrecipes import SimplyRecipes
from .twopeasandtheirpod import TwoPeasAndTheirPod
SCRAPERS = {
AllRecipes.host(): AllRecipes,
SimplyRecipes.host(): SimplyRecipes,
TwoPeasAndTheirPod.host(): TwoPeasAndTheirPod,
}
def url_path_to_dict(path):
pattern = (r'^'
r'((?P<schema>.+?)://)?'
r'((?P<user>.+?)(:(?P<password>.*?))?@)?'
r'(?P<host>.*?)'
r'(:(?P<port>\d+?))?'
r'(?P<path>/.*?)?'
r'(?P<query>[?].*?)?'
r'$'
)
regex = re.compile(pattern)
matches = regex.match(path)
url_dict = matches.groupdict() if matches is not None else None
return url_dict
def scrap_me(url_path):
return SCRAPERS[url_path_to_dict(url_path)['host']](url_path)
__all__ = ['scrap_me']
| import re
from .allrecipes import AllRecipes
SCRAPERS = {
AllRecipes.host(): AllRecipes,
}
def url_path_to_dict(path):
pattern = (r'^'
r'((?P<schema>.+?)://)?'
r'((?P<user>.+?)(:(?P<password>.*?))?@)?'
r'(?P<host>.*?)'
r'(:(?P<port>\d+?))?'
r'(?P<path>/.*?)?'
r'(?P<query>[?].*?)?'
r'$'
)
regex = re.compile(pattern)
matches = regex.match(path)
url_dict = matches.groupdict() if matches is not None else None
return url_dict
def scrap_me(url_path):
return SCRAPERS[url_path_to_dict(url_path)['host']](url_path)
__all__ = ['scrap_me']
| Python | 0.000004 |
5fd70e01f648da6dfc994bfe0e5c666c69fa9e45 | return None (null) in preference to empty string when recipe yield is unavailable | recipe_scrapers/vegolosi.py | recipe_scrapers/vegolosi.py | from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields, normalize_string
class Vegolosi(AbstractScraper):
@classmethod
def host(cls):
return "vegolosi.it"
def title(self):
return self.soup.find("h1").get_text().strip()
def preparation_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-prep-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def cooking_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-cook-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def total_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-total-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def yields(self):
possible_yields_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-yield"}
)
for element in possible_yields_info_elements:
if "persone" in element.get_text():
return get_yields(element)
def ingredients(self):
ingredients = self.soup.select(".tasty-recipe-ingredients > ul > li")
if not ingredients:
ingredients = self.soup.findAll("li", {"class": "ingredient"})
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
instructions = self.soup.findAll("div", {"class": "tasty-recipe-instructions"})
return "\n".join(
[normalize_string(instruction.get_text()) for instruction in instructions]
)
def ratings(self):
return round(
float(
self.soup.find("div", {"class": "tasty-recipe-rating rating_panel"})
.get("data-content-rate")
.replace(",", ".")
),
2,
)
| from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields, normalize_string
class Vegolosi(AbstractScraper):
@classmethod
def host(cls):
return "vegolosi.it"
def title(self):
return self.soup.find("h1").get_text().strip()
def preparation_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-prep-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def cooking_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-cook-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def total_time(self):
possible_time_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-total-time"}
)
return sum([get_minutes(element) for element in possible_time_info_elements])
def yields(self):
possible_yields_info_elements = self.soup.findAll(
"span", {"class": "tasty-recipes-yield"}
)
for element in possible_yields_info_elements:
if "persone" in element.get_text():
return get_yields(element)
return ""
def ingredients(self):
ingredients = self.soup.select(".tasty-recipe-ingredients > ul > li")
if not ingredients:
ingredients = self.soup.findAll("li", {"class": "ingredient"})
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
instructions = self.soup.findAll("div", {"class": "tasty-recipe-instructions"})
return "\n".join(
[normalize_string(instruction.get_text()) for instruction in instructions]
)
def ratings(self):
return round(
float(
self.soup.find("div", {"class": "tasty-recipe-rating rating_panel"})
.get("data-content-rate")
.replace(",", ".")
),
2,
)
| Python | 0.000089 |
c88b7d5fa934e25ae426d8b918d6eb8de414682d | Add missing _ssl constant. Close PyCQA/pylint#2629 | astroid/brain/brain_ssl.py | astroid/brain/brain_ssl.py | # Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for the ssl library."""
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
from astroid import nodes
from astroid import parse
def ssl_transform():
return parse(
"""
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext, MemoryBIO
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE,
OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3,
OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2,
OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE)
from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE,
ALERT_DESCRIPTION_BAD_RECORD_MAC,
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED,
ALERT_DESCRIPTION_CERTIFICATE_REVOKED,
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN,
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE,
ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR,
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE,
ALERT_DESCRIPTION_DECRYPT_ERROR,
ALERT_DESCRIPTION_HANDSHAKE_FAILURE,
ALERT_DESCRIPTION_ILLEGAL_PARAMETER,
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY,
ALERT_DESCRIPTION_INTERNAL_ERROR,
ALERT_DESCRIPTION_NO_RENEGOTIATION,
ALERT_DESCRIPTION_PROTOCOL_VERSION,
ALERT_DESCRIPTION_RECORD_OVERFLOW,
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE,
ALERT_DESCRIPTION_UNKNOWN_CA,
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY,
ALERT_DESCRIPTION_UNRECOGNIZED_NAME,
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE,
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION,
ALERT_DESCRIPTION_USER_CANCELLED)
from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL,
SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ,
SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN)
from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
from _ssl import _OPENSSL_API_VERSION
from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
from _ssl import PROTOCOL_TLS
"""
)
register_module_extender(MANAGER, "ssl", ssl_transform)
| # Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for the ssl library."""
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
from astroid import nodes
from astroid import parse
def ssl_transform():
return parse(
"""
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext, MemoryBIO
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE,
OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3,
OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2,
OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE)
from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE,
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE,
ALERT_DESCRIPTION_BAD_RECORD_MAC,
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED,
ALERT_DESCRIPTION_CERTIFICATE_REVOKED,
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN,
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE,
ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR,
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE,
ALERT_DESCRIPTION_DECRYPT_ERROR,
ALERT_DESCRIPTION_HANDSHAKE_FAILURE,
ALERT_DESCRIPTION_ILLEGAL_PARAMETER,
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY,
ALERT_DESCRIPTION_INTERNAL_ERROR,
ALERT_DESCRIPTION_NO_RENEGOTIATION,
ALERT_DESCRIPTION_PROTOCOL_VERSION,
ALERT_DESCRIPTION_RECORD_OVERFLOW,
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE,
ALERT_DESCRIPTION_UNKNOWN_CA,
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY,
ALERT_DESCRIPTION_UNRECOGNIZED_NAME,
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE,
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION,
ALERT_DESCRIPTION_USER_CANCELLED)
from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL,
SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ,
SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN)
from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
from _ssl import _OPENSSL_API_VERSION
from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
"""
)
register_module_extender(MANAGER, "ssl", ssl_transform)
| Python | 0.000006 |
631270eeafad8fd6b20973673f6d6e8b733e9029 | enable email | quant/tool/email_box.py | quant/tool/email_box.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from email.mime.text import MIMEText
from quant import config
import smtplib
mail_to = ["aiai373824745_wy@163.com"]
mail_host = "smtp.163.com"
mail_user = "aiai373824745_wy@163.com"
'''163邮箱smtp生成的密码'''
mail_pass = config.EMAIL_PASSWORD_163
mail_subject = 'logging'
def send_mail(content):
# pass
me = "QuantBot" + "<" + mail_user + ">"
msg = MIMEText(_text=content, _subtype='plain', _charset='utf-8')
msg['Subject'] = mail_subject
msg['From'] = me
msg['To'] = ";".join(mail_to)
try:
server = smtplib.SMTP()
server.connect(mail_host)
server.login(mail_user, mail_pass)
server.sendmail(me, mail_to, msg.as_string())
server.close()
return True
except Exception as e:
print (e)
return False
if __name__ == '__main__':
# for test
send_mail('content')
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from email.mime.text import MIMEText
from quant import config
import smtplib
mail_to = ["aiai373824745_wy@163.com"]
mail_host = "smtp.163.com"
mail_user = "aiai373824745_wy@163.com"
'''163邮箱smtp生成的密码'''
mail_pass = config.EMAIL_PASSWORD_163
mail_subject = 'logging'
def send_mail(content):
pass
# me = "QuantBot" + "<" + mail_user + ">"
# msg = MIMEText(_text=content, _subtype='plain', _charset='utf-8')
# msg['Subject'] = mail_subject
# msg['From'] = me
# msg['To'] = ";".join(mail_to)
# try:
# server = smtplib.SMTP()
# server.connect(mail_host)
# server.login(mail_user, mail_pass)
# server.sendmail(me, mail_to, msg.as_string())
# server.close()
# return True
# except Exception as e:
# print (e)
# return False
if __name__ == '__main__':
# for test
send_mail('content')
| Python | 0.00004 |
0caec903579e4cf3f22ea3e5ea1df3ecd8ad0fe3 | remove nigthly test hgemm_asm | test/nightly.py | test/nightly.py | #
# These nightly tests are slow but have good coverage. Fast tests with less coverage are in pre_checkin.py.
#
# To execute this test file, apt-get install python-pytest, then
# PYTHONPATH=. py.test -v test/nightly.py
#
# To run test directly, with complete output:
# mkdir build && cd build
# python ../Tensile/Tensile.py ../Tensile/Configs/test_hgemm_defaults.yaml ./
#
import Tensile.Tensile as Tensile
# defaults
def test_hgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_defaults.yaml"), tmpdir.strpath])
def test_sgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_defaults.yaml"), tmpdir.strpath])
def test_dgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_defaults.yaml"), tmpdir.strpath])
# thorough tests
def test_hgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm.yaml"), tmpdir.strpath])
def test_sgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm.yaml"), tmpdir.strpath])
# vectors
def test_hgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_vectors.yaml"), tmpdir.strpath])
def test_sgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_vectors.yaml"), tmpdir.strpath])
# tensor convolution
def test_tensor_convolution(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_convolution.yaml"), tmpdir.strpath])
# tensor contractions
def test_tensor_contraction(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_tensor_contraction.yaml"), tmpdir.strpath])
# assembly
def test_sgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_asm.yaml"), tmpdir.strpath])
def test_dgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_asm.yaml"), tmpdir.strpath])
| #
# These nightly tests are slow but have good coverage. Fast tests with less coverage are in pre_checkin.py.
#
# To execute this test file, apt-get install python-pytest, then
# PYTHONPATH=. py.test -v test/nightly.py
#
# To run test directly, with complete output:
# mkdir build && cd build
# python ../Tensile/Tensile.py ../Tensile/Configs/test_hgemm_defaults.yaml ./
#
import Tensile.Tensile as Tensile
# defaults
def test_hgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_defaults.yaml"), tmpdir.strpath])
def test_sgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_defaults.yaml"), tmpdir.strpath])
def test_dgemm_defaults(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_defaults.yaml"), tmpdir.strpath])
# thorough tests
def test_hgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm.yaml"), tmpdir.strpath])
def test_sgemm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm.yaml"), tmpdir.strpath])
# vectors
def test_hgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_vectors.yaml"), tmpdir.strpath])
def test_sgemm_vectors(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_vectors.yaml"), tmpdir.strpath])
# tensor convolution
def test_tensor_convolution(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_convolution.yaml"), tmpdir.strpath])
# tensor contractions
def test_tensor_contraction(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_tensor_contraction.yaml"), tmpdir.strpath])
# assembly
def test_hgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_hgemm_asm.yaml"), tmpdir.strpath])
def test_sgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_sgemm_asm.yaml"), tmpdir.strpath])
def test_dgemm_asm(tmpdir):
Tensile.Tensile([Tensile.TensileConfigPath("test_dgemm_asm.yaml"), tmpdir.strpath])
| Python | 0.016036 |
931cfb8025f45535b3bb839ebfa5191074a76b15 | Fix log capture on py3 | test/service.py | test/service.py | import logging
import re
import select
import subprocess
import threading
import time
__all__ = [
'ExternalService',
'SpawnedService',
]
class ExternalService(object):
def __init__(self, host, port):
logging.info("Using already running service at %s:%d", host, port)
self.host = host
self.port = port
def open(self):
pass
def close(self):
pass
class SpawnedService(threading.Thread):
def __init__(self, args=None, env=None):
threading.Thread.__init__(self)
if args is None:
raise TypeError("args parameter is required")
self.args = args
self.env = env
self.captured_stdout = []
self.captured_stderr = []
self.should_die = threading.Event()
def run(self):
self.run_with_handles()
def run_with_handles(self):
self.child = subprocess.Popen(
self.args,
env=self.env,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
alive = True
while True:
(rds, _, _) = select.select([self.child.stdout, self.child.stderr], [], [], 1)
if self.child.stdout in rds:
line = self.child.stdout.readline()
self.captured_stdout.append(line.decode('utf-8'))
if self.child.stderr in rds:
line = self.child.stderr.readline()
self.captured_stderr.append(line)
if self.should_die.is_set():
self.child.terminate()
alive = False
poll_results = self.child.poll()
if poll_results is not None:
if not alive:
break
else:
self.dump_logs()
raise RuntimeError("Subprocess has died. Aborting. (args=%s)" % ' '.join(str(x) for x in self.args))
def dump_logs(self):
logging.critical('stderr')
for line in self.captured_stderr:
logging.critical(line.rstrip())
logging.critical('stdout')
for line in self.captured_stdout:
logging.critical(line.rstrip())
def wait_for(self, pattern, timeout=30):
t1 = time.time()
while True:
t2 = time.time()
if t2 - t1 >= timeout:
try:
self.child.kill()
except:
logging.exception("Received exception when killing child process")
self.dump_logs()
raise RuntimeError("Waiting for %r timed out after %d seconds" % (pattern, timeout))
if re.search(pattern, '\n'.join(self.captured_stdout), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stdout", pattern, (t2 - t1))
return
if re.search(pattern, '\n'.join(self.captured_stderr), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stderr", pattern, (t2 - t1))
return
time.sleep(0.1)
def start(self):
threading.Thread.start(self)
def stop(self):
self.should_die.set()
self.join()
| import logging
import re
import select
import subprocess
import threading
import time
__all__ = [
'ExternalService',
'SpawnedService',
]
class ExternalService(object):
def __init__(self, host, port):
logging.info("Using already running service at %s:%d", host, port)
self.host = host
self.port = port
def open(self):
pass
def close(self):
pass
class SpawnedService(threading.Thread):
def __init__(self, args=None, env=None):
threading.Thread.__init__(self)
if args is None:
raise TypeError("args parameter is required")
self.args = args
self.env = env
self.captured_stdout = []
self.captured_stderr = []
self.should_die = threading.Event()
def run(self):
self.run_with_handles()
def run_with_handles(self):
self.child = subprocess.Popen(
self.args,
env=self.env,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
alive = True
while True:
(rds, _, _) = select.select([self.child.stdout, self.child.stderr], [], [], 1)
if self.child.stdout in rds:
line = self.child.stdout.readline()
self.captured_stdout.append(line)
if self.child.stderr in rds:
line = self.child.stderr.readline()
self.captured_stderr.append(line)
if self.should_die.is_set():
self.child.terminate()
alive = False
poll_results = self.child.poll()
if poll_results is not None:
if not alive:
break
else:
self.dump_logs()
raise RuntimeError("Subprocess has died. Aborting. (args=%s)" % ' '.join(str(x) for x in self.args))
def dump_logs(self):
logging.critical('stderr')
for line in self.captured_stderr:
logging.critical(line.rstrip())
logging.critical('stdout')
for line in self.captured_stdout:
logging.critical(line.rstrip())
def wait_for(self, pattern, timeout=30):
t1 = time.time()
while True:
t2 = time.time()
if t2 - t1 >= timeout:
try:
self.child.kill()
except:
logging.exception("Received exception when killing child process")
self.dump_logs()
raise RuntimeError("Waiting for %r timed out after %d seconds" % (pattern, timeout))
if re.search(pattern, '\n'.join(self.captured_stdout), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stdout", pattern, (t2 - t1))
return
if re.search(pattern, '\n'.join(self.captured_stderr), re.IGNORECASE) is not None:
logging.info("Found pattern %r in %d seconds via stderr", pattern, (t2 - t1))
return
time.sleep(0.1)
def start(self):
threading.Thread.start(self)
def stop(self):
self.should_die.set()
self.join()
| Python | 0 |
94fc7881052fea4e7d83f35e41fab4f5ed108f34 | fix styling | spectate/utils.py | spectate/utils.py | class Sentinel:
__slots__ = "_name"
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name # pragma: no cover
| from collections.abc import Mapping
class Sentinel:
__slots__ = "_name"
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name # pragma: no cover
| Python | 0.000001 |
800ef1d1305f125695073732f4b6155d6f0cb445 | Update rasa/cli/interactive.py | rasa/cli/interactive.py | rasa/cli/interactive.py | import argparse
import os
from typing import List, Text
import rasa.cli.train as train
from rasa.cli.arguments import interactive as arguments
from rasa import data, model
# noinspection PyProtectedMember
from rasa.cli.utils import get_validated_path, print_error
from rasa.constants import (
DEFAULT_DATA_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_ENDPOINTS_PATH,
)
from rasa.model import get_latest_model
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
interactive_parser = subparsers.add_parser(
"interactive",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session to create new training data for a "
"Rasa model by chatting.",
)
interactive_parser.set_defaults(func=interactive)
interactive_parser.add_argument(
"--e2e", action="store_true", help="Save story files in e2e format. In this format user messages will be included in the stories."
)
interactive_subparsers = interactive_parser.add_subparsers()
interactive_core_parser = interactive_subparsers.add_parser(
"core",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session model to create new training data "
"for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. "
"`/<intent>` input format.",
)
interactive_core_parser.set_defaults(func=interactive_core)
arguments.set_interactive_arguments(interactive_parser)
arguments.set_interactive_core_arguments(interactive_core_parser)
def interactive(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
check_training_data(args)
zipped_model = train.train(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def interactive_core(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
zipped_model = train.train_core(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def perform_interactive_learning(args, zipped_model):
from rasa.core.train import do_interactive_learning
if zipped_model and os.path.exists(zipped_model):
args.model = zipped_model
with model.unpack_model(zipped_model) as model_path:
args.core, args.nlu = model.get_model_subdirectories(model_path)
stories_directory = data.get_core_directory(args.data)
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
do_interactive_learning(args, stories_directory)
else:
print_error(
"Interactive learning process cannot be started as no initial model was "
"found. Use 'rasa train' to train a model."
)
def get_provided_model(arg_model: Text):
model_path = get_validated_path(arg_model, "model", DEFAULT_MODELS_PATH)
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
return model_path
def check_training_data(args):
training_files = [
get_validated_path(f, "data", DEFAULT_DATA_PATH, none_is_valid=True)
for f in args.data
]
story_files, nlu_files = data.get_core_nlu_files(training_files)
if not story_files or not nlu_files:
print_error(
"Cannot train initial Rasa model. Please provide NLU and Core data "
"using the '--data' argument."
)
exit(1)
| import argparse
import os
from typing import List, Text
import rasa.cli.train as train
from rasa.cli.arguments import interactive as arguments
from rasa import data, model
# noinspection PyProtectedMember
from rasa.cli.utils import get_validated_path, print_error
from rasa.constants import (
DEFAULT_DATA_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_ENDPOINTS_PATH,
)
from rasa.model import get_latest_model
def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
interactive_parser = subparsers.add_parser(
"interactive",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session to create new training data for a "
"Rasa model by chatting.",
)
interactive_parser.set_defaults(func=interactive)
interactive_parser.add_argument(
"--e2e", action="store_true", help="save file in e2e format"
)
interactive_subparsers = interactive_parser.add_subparsers()
interactive_core_parser = interactive_subparsers.add_parser(
"core",
conflict_handler="resolve",
parents=parents,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Starts an interactive learning session model to create new training data "
"for a Rasa Core model by chatting. Uses the 'RegexInterpreter', i.e. "
"`/<intent>` input format.",
)
interactive_core_parser.set_defaults(func=interactive_core)
arguments.set_interactive_arguments(interactive_parser)
arguments.set_interactive_core_arguments(interactive_core_parser)
def interactive(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
check_training_data(args)
zipped_model = train.train(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def interactive_core(args: argparse.Namespace):
args.fixed_model_name = None
args.store_uncompressed = False
if args.model is None:
zipped_model = train.train_core(args)
else:
zipped_model = get_provided_model(args.model)
perform_interactive_learning(args, zipped_model)
def perform_interactive_learning(args, zipped_model):
from rasa.core.train import do_interactive_learning
if zipped_model and os.path.exists(zipped_model):
args.model = zipped_model
with model.unpack_model(zipped_model) as model_path:
args.core, args.nlu = model.get_model_subdirectories(model_path)
stories_directory = data.get_core_directory(args.data)
args.endpoints = get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
do_interactive_learning(args, stories_directory)
else:
print_error(
"Interactive learning process cannot be started as no initial model was "
"found. Use 'rasa train' to train a model."
)
def get_provided_model(arg_model: Text):
model_path = get_validated_path(arg_model, "model", DEFAULT_MODELS_PATH)
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
return model_path
def check_training_data(args):
training_files = [
get_validated_path(f, "data", DEFAULT_DATA_PATH, none_is_valid=True)
for f in args.data
]
story_files, nlu_files = data.get_core_nlu_files(training_files)
if not story_files or not nlu_files:
print_error(
"Cannot train initial Rasa model. Please provide NLU and Core data "
"using the '--data' argument."
)
exit(1)
| Python | 0 |
7329757e1ad30e327c1ae823a8302c79482d6b9c | Update BUILD_OSS to 4632 | src/data/version/mozc_version_template.bzl | src/data/version/mozc_version_template.bzl | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4632
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4624
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| Python | 0 |
e243e907e58047e18c0a16e061f7aa718e3b5854 | Remove unavailable imports | statsmodels/compat/__init__.py | statsmodels/compat/__init__.py | from .python import ( # noqa:F401
PY3, PY37,
bytes, str, unicode, string_types,
asunicode, asbytes, asstr, asstr2,
range, zip, filter, map,
lrange, lzip, lmap, lfilter,
cStringIO, StringIO, BytesIO,
cPickle, pickle,
iteritems, iterkeys, itervalues,
urlopen, urljoin, urlencode, HTTPError, URLError,
reduce, long, unichr, zip_longest,
builtins,
getargspec,
next, get_class
)
__all__ = ['PY3', 'PY37', 'bytes', 'str', 'unicode', 'string_types',
'asunicode', 'asbytes', 'asstr', 'asstr2', 'range', 'zip',
'filter', 'map', 'lrange', 'lzip', 'lmap', 'lfilter', 'cStringIO',
'StringIO', 'BytesIO', 'cPickle', 'pickle', 'iteritems',
'iterkeys', 'itervalues', 'urlopen', 'urljoin', 'urlencode',
'HTTPError', 'URLError', 'reduce', 'long', 'unichr', 'zip_longest',
'builtins', 'getargspec', 'next', 'get_class']
| from .python import ( # noqa:F401
PY3, PY37,
bytes, str, unicode, string_types,
asunicode, asbytes, asstr, asstr2, asunicode_nested, asbytes_nested,
range, zip, filter, map,
lrange, lzip, lmap, lfilter,
cStringIO, StringIO, BytesIO,
cPickle, pickle,
iteritems, iterkeys, itervalues,
urlopen, urljoin, urlencode, HTTPError, URLError,
reduce, long, unichr, zip_longest,
strchar,
isfileobj,
open_latin1,
builtins,
getargspec,
input,
getexception,
advance_iterator, next,
callable,
get_function_name, get_class
)
| Python | 0 |
0629b30ade8b619697e8cc28d651904e742cd70e | Correct inst method names in system info, add Docker version (#36360) | homeassistant/helpers/system_info.py | homeassistant/helpers/system_info.py | """Helper to gather system info."""
import os
import platform
from typing import Dict
from homeassistant.const import __version__ as current_version
from homeassistant.loader import bind_hass
from homeassistant.util.package import is_virtual_env
from .typing import HomeAssistantType
@bind_hass
async def async_get_system_info(hass: HomeAssistantType) -> Dict:
"""Return info about the system."""
info_object = {
"installation_type": "Unknown",
"version": current_version,
"dev": "dev" in current_version,
"hassio": hass.components.hassio.is_hassio(),
"virtualenv": is_virtual_env(),
"python_version": platform.python_version(),
"docker": False,
"arch": platform.machine(),
"timezone": str(hass.config.time_zone),
"os_name": platform.system(),
"os_version": platform.release(),
}
if platform.system() == "Windows":
info_object["os_version"] = platform.win32_ver()[0]
elif platform.system() == "Darwin":
info_object["os_version"] = platform.mac_ver()[0]
elif platform.system() == "Linux":
info_object["docker"] = os.path.isfile("/.dockerenv")
# Determine installation type on current data
if info_object["docker"]:
info_object["installation_type"] = "Home Assistant Container"
elif is_virtual_env():
info_object["installation_type"] = "Home Assistant Core"
# Enrich with Supervisor information
if hass.components.hassio.is_hassio():
info = hass.components.hassio.get_info()
host = hass.components.hassio.get_host_info()
info_object["supervisor"] = info.get("supervisor")
info_object["host_os"] = host.get("operating_system")
info_object["chassis"] = host.get("chassis")
info_object["docker_version"] = info.get("docker")
if info.get("hassos") is not None:
info_object["installation_type"] = "Home Assistant"
else:
info_object["installation_type"] = "Home Assistant Supervised"
return info_object
| """Helper to gather system info."""
import os
import platform
from typing import Dict
from homeassistant.const import __version__ as current_version
from homeassistant.loader import bind_hass
from homeassistant.util.package import is_virtual_env
from .typing import HomeAssistantType
@bind_hass
async def async_get_system_info(hass: HomeAssistantType) -> Dict:
"""Return info about the system."""
info_object = {
"installation_type": "Unknown",
"version": current_version,
"dev": "dev" in current_version,
"hassio": hass.components.hassio.is_hassio(),
"virtualenv": is_virtual_env(),
"python_version": platform.python_version(),
"docker": False,
"arch": platform.machine(),
"timezone": str(hass.config.time_zone),
"os_name": platform.system(),
"os_version": platform.release(),
}
if platform.system() == "Windows":
info_object["os_version"] = platform.win32_ver()[0]
elif platform.system() == "Darwin":
info_object["os_version"] = platform.mac_ver()[0]
elif platform.system() == "Linux":
info_object["docker"] = os.path.isfile("/.dockerenv")
# Determine installation type on current data
if info_object["docker"]:
info_object["installation_type"] = "Home Assistant Core on Docker"
elif is_virtual_env():
info_object[
"installation_type"
] = "Home Assistant Core in a Python Virtual Environment"
# Enrich with Supervisor information
if hass.components.hassio.is_hassio():
info = hass.components.hassio.get_info()
host = hass.components.hassio.get_host_info()
info_object["supervisor"] = info.get("supervisor")
info_object["host_os"] = host.get("operating_system")
info_object["chassis"] = host.get("chassis")
if info.get("hassos") is not None:
info_object["installation_type"] = "Home Assistant"
else:
info_object["installation_type"] = "Home Assistant Supervised"
return info_object
| Python | 0 |
bc638d11be50f8480d1f103d3a25484c6ccb52b7 | clean code in disocvery_json_view.py and add comments | hs_core/views/discovery_json_view.py | hs_core/views/discovery_json_view.py | import simplejson as json
from django.http import HttpResponse
from haystack.generic_views import FacetedSearchView
from hs_core.discovery_form import DiscoveryForm
# View class for generating JSON data format from Haystack
# returned JSON objects array is used for building the map view
class DiscoveryJsonView(FacetedSearchView):
# set facet fields
facet_fields = ['author', 'subjects', 'resource_type', 'public', 'owners_names', 'discoverable']
# declare form class to use in this view
form_class = DiscoveryForm
# overwrite Haystack generic_view.py form_valid() function to generate JSON response
def form_valid(self, form):
# initialize an empty array for holding the result objects with coordinate values
coor_values = []
# get query set
self.queryset = form.search()
# When we have a GET request with search query, build our JSON objects array
if len(self.request.GET):
# iterate all the search results
for result in self.get_queryset():
# initialize a null JSON object
json_obj = {}
# assign title and url values to the object
json_obj['title'] = result.object.title
json_obj['get_absolute_url'] = result.object.get_absolute_url()
# iterate all the coverage values
for coverage in result.object.metadata.coverages.all():
# if coverage type is point, assign 'east' and 'north' coordinates to the object
if coverage.type == 'point':
json_obj['coverage_type'] = coverage.type
json_obj['east'] = coverage.value['east']
json_obj['north'] = coverage.value['north']
# elif coverage type is box, assign 'northlimit', 'eastlimit', 'southlimit' and 'westlimit' coordinates to the object
elif coverage.type == 'box':
json_obj['coverage_type'] = coverage.type
json_obj['northlimit'] = coverage.value['northlimit']
json_obj['eastlimit'] = coverage.value['eastlimit']
json_obj['southlimit'] = coverage.value['southlimit']
json_obj['westlimit'] = coverage.value['westlimit']
# else, skip
else:
continue
# encode object to JSON format
coor_obj = json.dumps(json_obj)
# add JSON object the results array
coor_values.append(coor_obj)
# encode the results results array to JSON array
the_data = json.dumps(coor_values)
# return JSON response
return HttpResponse(the_data, content_type='application/json')
| import simplejson as json
from django.http import HttpResponse
from haystack.query import SearchQuerySet
from django import forms
from haystack.forms import FacetedSearchForm
from haystack.generic_views import FacetedSearchView
from django.core import serializers
from hs_core.discovery_form import DiscoveryForm
class DiscoveryJsonView(FacetedSearchView):
facet_fields = ['author', 'subjects', 'resource_type', 'public', 'owners_names', 'discoverable']
form_class = DiscoveryForm
def form_valid(self, form):
coor_values = []
coordinate_dictionary = []
self.queryset = form.search()
if len(self.request.GET):
for result in self.get_queryset():
json_obj = {}
json_obj['title'] = result.object.title
json_obj['get_absolute_url'] = result.object.get_absolute_url()
for coverage in result.object.metadata.coverages.all():
if coverage.type == 'point':
json_obj['coverage_type'] = coverage.type
json_obj['east'] = coverage.value['east']
json_obj['north'] = coverage.value['north']
elif coverage.type == 'box':
json_obj['coverage_type'] = coverage.type
json_obj['northlimit'] = coverage.value['northlimit']
json_obj['eastlimit'] = coverage.value['eastlimit']
json_obj['southlimit'] = coverage.value['southlimit']
json_obj['westlimit'] = coverage.value['westlimit']
else:
continue
coor_obj = json.dumps(json_obj)
coor_values.append(coor_obj)
the_data = json.dumps(coor_values)
return HttpResponse(the_data, content_type='application/json')
| Python | 0 |
c1b19af7229d582f7bd474a05a679cf45e3c9bf8 | add proxy + fix import modules | tests/basics.py | tests/basics.py | # -*- coding: utf-8 -*-
"""
@author: Nicolas Rivet
test the connection to IG API
do some basic operations
"""
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ig')))
import ig_service as igs
import ig_tools as igt
def main():
"""Main module for testing."""
#get config for demo API
proxy_user, proxy_password, api_key, username, password, account = \
igt.getconfig('demo')
#login demo API
service=igs.IGservice(username, password, api_key, account, 'demo', proxy_user, proxy_password)
log=service.login()
print('\n', 'login', '\n', log)
#get newest bidask
instrument='CS.D.EURUSD.CFD.IP'
bidask=service.get_bidask(instrument)
print('\n', 'get_bidask of EURUSD', '\n', bidask)
#get historical closes
resolution='MINUTE'
max_size=10
closes=service.get_closes(instrument, resolution, max_size)
print('\n', 'get_closes of EURUSD for the last 10 minutes', '\n', closes)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""
@author: Nicolas Rivet
test the connection to IG API
do some basic operations
"""
from ig.ig_service import IGservice as igs
import ig.ig_tools as igt
def main():
"""Main module for testing."""
#get config for demo API
proxy_user, proxy_password, api_key, username, password, account = \
igt.getconfig('demo')
#login demo API
service=igs(username, password, api_key, account, 'demo')
log=igs.login(service)
print(log[0])
#get newest bidask
instrument='CS.D.EURUSD.CFD.IP'
bidask=igs.get_bidask(service, instrument)
print(bidask)
#get historical closes
resolution='MINUTE'
max_size=10
closes=igs.get_closes(service, instrument, resolution, max_size)
print(closes)
if __name__ == '__main__':
main() | Python | 0 |
d23a68d464c62cdefb76dbe5855110374680ae61 | Add coverage metrics to python code | regulations/settings/dev.py | regulations/settings/dev.py | from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STATICFILES_DIRS = (
root('static'),
)
OFFLINE_OUTPUT_DIR = '/tmp/'
INSTALLED_APPS += (
'django_nose',
)
NOSE_ARGS = [
'--with-coverage',
'--cover-package=regulations',
'--exclude-dir=regulations/uitests'
]
try:
from local_settings import *
except ImportError:
pass
| from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STATICFILES_DIRS = (
root('static'),
)
OFFLINE_OUTPUT_DIR = '/tmp/'
INSTALLED_APPS += (
'django_nose',
)
NOSE_ARGS = [
'--exclude-dir=regulations/uitests'
]
try:
from local_settings import *
except ImportError:
pass
| Python | 0.000004 |
6b5d4f43b5d22f70db6d08f8093f88785359a404 | Implement DirectoryTarget | streaming_form_data/targets.py | streaming_form_data/targets.py | import hashlib
from typing import Callable, Optional
class BaseTarget:
"""
Targets determine what to do with some input once the parser is done
processing it. Any new Target should inherit from this base class and
override the :code:`data_received` function.
Attributes:
multipart_filename: the name of the file advertised by the user,
extracted from the :code:`Content-Disposition` header. Please note
that this value comes directly from the user input and is not
sanitized, so be careful in using it directly.
multipart_content_type: MIME Content-Type of the file, extracted from
the :code:`Content-Type` HTTP header
"""
def __init__(self, validator: Optional[Callable] = None):
self.multipart_filename = None
self.multipart_content_type = None
self._started = False
self._finished = False
self._validator = validator
def _validate(self, chunk: bytes):
if self._validator:
self._validator(chunk)
def start(self):
self._started = True
self.on_start()
def on_start(self):
pass
def data_received(self, chunk: bytes):
self._validate(chunk)
self.on_data_received(chunk)
def on_data_received(self, chunk: bytes):
raise NotImplementedError()
def finish(self):
self.on_finish()
self._finished = True
def on_finish(self):
pass
class NullTarget(BaseTarget):
"""NullTarget ignores whatever input is passed in.
This is mostly useful for internal use and should (normally) not be
required by external users.
"""
def on_data_received(self, chunk: bytes):
pass
class ValueTarget(BaseTarget):
"""ValueTarget stores the input in an in-memory list of bytes.
This is useful in case you'd like to have the value contained in an
in-memory string.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._values = []
def on_data_received(self, chunk: bytes):
self._values.append(chunk)
@property
def value(self):
return b''.join(self._values)
class FileTarget(BaseTarget):
"""FileTarget writes (streams) the input to an on-disk file."""
def __init__(
self, filename: str, allow_overwrite: bool = True, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.filename = filename
self._mode = 'wb' if allow_overwrite else 'xb'
self._fd = None
def on_start(self):
self._fd = open(self.filename, self._mode)
def on_data_received(self, chunk: bytes):
if self._fd:
self._fd.write(chunk)
def on_finish(self):
if self._fd:
self._fd.close()
class DirectoryTarget(BaseTarget):
"""DirectoryTarget writes (streams) the different input to an on-disk directory."""
def __init__(
self, directorypath: str, allow_overwrite: bool = True, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.directorypath = directorypath
self._mode = 'wb' if allow_overwrite else 'xb'
self._fd = None
self.multipart_filenames = []
self.multipart_content_types = []
def on_start(self):
self._fd = open(self.directorypath.joinpath(self.multipart_filename), self._mode)
def on_data_received(self, chunk: bytes):
if self._fd:
self._fd.write(chunk)
def on_finish(self):
self.multipart_filenames.append(self.multipart_filename)
self.multipart_content_types.append(self.multipart_content_type)
if self._fd:
self._fd.close()
class SHA256Target(BaseTarget):
"""SHA256Target calculates the SHA256 hash of the given input."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hash = hashlib.sha256()
def on_data_received(self, chunk: bytes):
self._hash.update(chunk)
@property
def value(self):
return self._hash.hexdigest()
| import hashlib
from typing import Callable, Optional
class BaseTarget:
"""
Targets determine what to do with some input once the parser is done
processing it. Any new Target should inherit from this base class and
override the :code:`data_received` function.
Attributes:
multipart_filename: the name of the file advertised by the user,
extracted from the :code:`Content-Disposition` header. Please note
that this value comes directly from the user input and is not
sanitized, so be careful in using it directly.
multipart_content_type: MIME Content-Type of the file, extracted from
the :code:`Content-Type` HTTP header
"""
def __init__(self, validator: Optional[Callable] = None):
self.multipart_filename = None
self.multipart_content_type = None
self._started = False
self._finished = False
self._validator = validator
def _validate(self, chunk: bytes):
if self._validator:
self._validator(chunk)
def start(self):
self._started = True
self.on_start()
def on_start(self):
pass
def data_received(self, chunk: bytes):
self._validate(chunk)
self.on_data_received(chunk)
def on_data_received(self, chunk: bytes):
raise NotImplementedError()
def finish(self):
self.on_finish()
self._finished = True
def on_finish(self):
pass
class NullTarget(BaseTarget):
"""NullTarget ignores whatever input is passed in.
This is mostly useful for internal use and should (normally) not be
required by external users.
"""
def on_data_received(self, chunk: bytes):
pass
class ValueTarget(BaseTarget):
"""ValueTarget stores the input in an in-memory list of bytes.
This is useful in case you'd like to have the value contained in an
in-memory string.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._values = []
def on_data_received(self, chunk: bytes):
self._values.append(chunk)
@property
def value(self):
return b''.join(self._values)
class FileTarget(BaseTarget):
"""FileTarget writes (streams) the input to an on-disk file."""
def __init__(
self, filename: str, allow_overwrite: bool = True, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.filename = filename
self._mode = 'wb' if allow_overwrite else 'xb'
self._fd = None
def on_start(self):
self._fd = open(self.filename, self._mode)
def on_data_received(self, chunk: bytes):
if self._fd:
self._fd.write(chunk)
def on_finish(self):
if self._fd:
self._fd.close()
class SHA256Target(BaseTarget):
"""SHA256Target calculates the SHA256 hash of the given input."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hash = hashlib.sha256()
def on_data_received(self, chunk: bytes):
self._hash.update(chunk)
@property
def value(self):
return self._hash.hexdigest()
| Python | 0 |
2bb57c932f3337fe5df24040523920f399a88fb2 | The function apply_template has been renamed to display. | trac/File.py | trac/File.py | # -*- coding: iso8859-1 -*-
#
# Copyright (C) 2003, 2004 Edgewall Software
# Copyright (C) 2003, 2004 Jonas Borgstrm <jonas@edgewall.com>
#
# Trac is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Trac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Jonas Borgstrm <jonas@edgewall.com>
import sys
import StringIO
from time import gmtime, strftime
from svn import fs, util, delta
from Module import Module
from util import dict_get_with_default
import perm
class File (Module):
CHUNK_SIZE = 4096
def render (self):
self.perm.assert_permission (perm.FILE_VIEW)
def get_mime_type (self, root, path):
"""
Try to use the mime-type stored in subversion. text/plain is default.
"""
type = fs.node_prop (root, path, util.SVN_PROP_MIME_TYPE, self.pool)
if not type:
type = 'text/plain'
return type
def display (self):
rev = dict_get_with_default(self.args, 'rev', None)
path = dict_get_with_default(self.args, 'path', '/')
if not rev:
rev = fs.youngest_rev(self.fs_ptr, self.pool)
else:
rev = int(rev)
root = fs.revision_root(self.fs_ptr, rev, self.pool)
mime_type = self.get_mime_type (root, path)
size = fs.file_length(root, path, self.pool)
date = fs.revision_prop(self.fs_ptr, rev,
util.SVN_PROP_REVISION_DATE, self.pool)
date_seconds = util.svn_time_from_cstring(date, self.pool) / 1000000
date = strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(date_seconds))
sys.stdout.write('Last-Modified: %s\r\n' % date)
sys.stdout.write('Content-Length: %d\r\n' % size)
sys.stdout.write('Content-Type: %s\r\n\r\n' % mime_type)
file = fs.file_contents(root, path, self.pool)
while 1:
data = util.svn_stream_read(file, self.CHUNK_SIZE)
if not data:
break
sys.stdout.write(data)
| # -*- coding: iso8859-1 -*-
#
# Copyright (C) 2003, 2004 Edgewall Software
# Copyright (C) 2003, 2004 Jonas Borgstrm <jonas@edgewall.com>
#
# Trac is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Trac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Jonas Borgstrm <jonas@edgewall.com>
import sys
import StringIO
from time import gmtime, strftime
from svn import fs, util, delta
from Module import Module
from util import dict_get_with_default
import perm
class File (Module):
CHUNK_SIZE = 4096
def render (self):
self.perm.assert_permission (perm.FILE_VIEW)
def get_mime_type (self, root, path):
"""
Try to use the mime-type stored in subversion. text/plain is default.
"""
type = fs.node_prop (root, path, util.SVN_PROP_MIME_TYPE, self.pool)
if not type:
type = 'text/plain'
return type
def apply_template (self):
rev = dict_get_with_default(self.args, 'rev', None)
path = dict_get_with_default(self.args, 'path', '/')
if not rev:
rev = fs.youngest_rev(self.fs_ptr, self.pool)
else:
rev = int(rev)
root = fs.revision_root(self.fs_ptr, rev, self.pool)
mime_type = self.get_mime_type (root, path)
size = fs.file_length(root, path, self.pool)
date = fs.revision_prop(self.fs_ptr, rev,
util.SVN_PROP_REVISION_DATE, self.pool)
date_seconds = util.svn_time_from_cstring(date, self.pool) / 1000000
date = strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(date_seconds))
sys.stdout.write('Last-Modified: %s\r\n' % date)
sys.stdout.write('Content-Length: %d\r\n' % size)
sys.stdout.write('Content-Type: %s\r\n\r\n' % mime_type)
file = fs.file_contents(root, path, self.pool)
while 1:
data = util.svn_stream_read(file, self.CHUNK_SIZE)
if not data:
break
sys.stdout.write(data)
| Python | 0.999875 |
8cc36a325e8bedb7894f31fe049aee1aef903811 | remove unused code | examples/glyphs/buttons_server.py | examples/glyphs/buttons_server.py | from __future__ import print_function
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.plotting import curdoc
from bokeh.models.widgets import (
VBox, Icon,
Button, Toggle, Dropdown,
CheckboxGroup, RadioGroup,
CheckboxButtonGroup, RadioButtonGroup,
)
from bokeh.client import push_session
def button_handler():
print("button_handler: click")
def toggle_handler(active):
print("toggle_handler: %s" % active)
def dropdown_handler(value):
print("dropdown_handler: %s" % value)
def split_handler(value):
print("split_handler: %s" % value)
def checkbox_group_handler(active):
print("checkbox_group_handler: %s" % active)
def radio_group_handler(active):
print("radio_group_handler: %s" % active)
def checkbox_button_group_handler(active):
print("checkbox_button_group_handler: %s" % active)
def radio_button_group_handler(active):
print("radio_button_group_handler: %s" % active)
button = Button(label="Push button", icon=Icon(name="check"), type="primary")
button.on_click(button_handler)
toggle = Toggle(label="Toggle button", type="success")
toggle.on_click(toggle_handler)
menu = [("Item 1", "item_1"), ("Item 2", "item_2"), None, ("Item 3", "item_3")]
dropdown = Dropdown(label="Dropdown button", type="warning", menu=menu)
dropdown.on_click(dropdown_handler)
menu = [("Item 1", "foo"), ("Item 2", "bar"), None, ("Item 3", "baz")]
split = Dropdown(label="Split button", type="danger", menu=menu, default_value="baz")
split.on_click(split_handler)
checkbox_group = CheckboxGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_group.on_click(checkbox_group_handler)
radio_group = RadioGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_group.on_click(radio_group_handler)
checkbox_button_group = CheckboxButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_button_group.on_click(checkbox_button_group_handler)
radio_button_group = RadioButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_button_group.on_click(radio_button_group_handler)
vbox = VBox(children=[button, toggle, dropdown, split, checkbox_group, radio_group, checkbox_button_group, radio_button_group])
document = Document()
document.add(vbox)
session = push_session(document)
session.show()
if __name__ == "__main__":
session.loop_until_closed()
| from __future__ import print_function
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.plotting import curdoc
from bokeh.models.widgets import (
VBox, Icon,
Button, Toggle, Dropdown,
CheckboxGroup, RadioGroup,
CheckboxButtonGroup, RadioButtonGroup,
)
from bokeh.models import Plot
from bokeh.client import push_session
def button_handler():
print("button_handler: click")
def toggle_handler(active):
print("toggle_handler: %s" % active)
def dropdown_handler(value):
print("dropdown_handler: %s" % value)
def split_handler(value):
print("split_handler: %s" % value)
def checkbox_group_handler(active):
print("checkbox_group_handler: %s" % active)
def radio_group_handler(active):
print("radio_group_handler: %s" % active)
def checkbox_button_group_handler(active):
print("checkbox_button_group_handler: %s" % active)
def radio_button_group_handler(active):
print("radio_button_group_handler: %s" % active)
button = Button(label="Push button", icon=Icon(name="check"), type="primary")
button.on_click(button_handler)
toggle = Toggle(label="Toggle button", type="success")
toggle.on_click(toggle_handler)
menu = [("Item 1", "item_1"), ("Item 2", "item_2"), None, ("Item 3", "item_3")]
dropdown = Dropdown(label="Dropdown button", type="warning", menu=menu)
dropdown.on_click(dropdown_handler)
menu = [("Item 1", "foo"), ("Item 2", "bar"), None, ("Item 3", "baz")]
split = Dropdown(label="Split button", type="danger", menu=menu, default_value="baz")
split.on_click(split_handler)
checkbox_group = CheckboxGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_group.on_click(checkbox_group_handler)
radio_group = RadioGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_group.on_click(radio_group_handler)
checkbox_button_group = CheckboxButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_button_group.on_click(checkbox_button_group_handler)
radio_button_group = RadioButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_button_group.on_click(radio_button_group_handler)
vbox = VBox(children=[button, toggle, dropdown, split, checkbox_group, radio_group, checkbox_button_group, radio_button_group])
document = Document()
document.add(vbox)
session = push_session(document)
session.show()
if __name__ == "__main__":
session.loop_until_closed()
| Python | 0.000017 |
5a582564e3fcf97aba0e9595807e1cdecb408210 | add data integration to sync sequence | frappe/model/sync.py | frappe/model/sync.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os
from frappe.modules.import_file import import_file_by_path
from frappe.modules.patch_handler import block_user
from frappe.utils import update_progress_bar
def sync_all(force=0, verbose=False, reset_permissions=False):
block_user(True)
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose, reset_permissions=reset_permissions)
block_user(False)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False, reset_permissions=False):
files = []
if app_name == "frappe":
# these need to go first at time of install
for d in (("core", "docfield"),
("core", "docperm"),
("core", "has_role"),
("core", "doctype"),
("core", "user"),
("core", "role"),
("custom", "custom_field"),
("custom", "property_setter"),
("website", "web_form"),
("website", "web_form_field"),
("website", "portal_menu_item"),
("data_migration", "data_migration_mapping_detail"),
("data_migration", "data_migration_mapping"),
("data_migration", "data_migration_plan_mapping"),
("data_migration", "data_migration_plan")):
files.append(os.path.join(frappe.get_app_path("frappe"), d[0],
"doctype", d[1], d[1] + ".json"))
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
get_doc_files(files, folder, force, sync_everything, verbose=verbose)
l = len(files)
if l:
for i, doc_path in enumerate(files):
import_file_by_path(doc_path, force=force, ignore_version=True,
reset_permissions=reset_permissions, for_sync=True)
#print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
# show progress bar
update_progress_bar("Updating DocTypes for {0}".format(app_name), i, l)
# print each progress bar on new line
print()
def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
# load in sequence - warning for devs
document_types = ['doctype', 'page', 'report', 'print_format',
'website_theme', 'web_form', 'email_alert', 'print_style',
'data_migration_mapping', 'data_migration_plan']
for doctype in document_types:
doctype_path = os.path.join(start_path, doctype)
if os.path.exists(doctype_path):
for docname in os.listdir(doctype_path):
if os.path.isdir(os.path.join(doctype_path, docname)):
doc_path = os.path.join(doctype_path, docname, docname) + ".json"
if os.path.exists(doc_path):
if not doc_path in files:
files.append(doc_path)
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os
from frappe.modules.import_file import import_file_by_path
from frappe.modules.patch_handler import block_user
from frappe.utils import update_progress_bar
def sync_all(force=0, verbose=False, reset_permissions=False):
block_user(True)
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose, reset_permissions=reset_permissions)
block_user(False)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False, reset_permissions=False):
files = []
if app_name == "frappe":
# these need to go first at time of install
for d in (("core", "docfield"), ("core", "docperm"), ("core", "has_role"), ("core", "doctype"),
("core", "user"), ("core", "role"), ("custom", "custom_field"),
("custom", "property_setter"), ("website", "web_form"),
("website", "web_form_field"), ("website", "portal_menu_item")):
files.append(os.path.join(frappe.get_app_path("frappe"), d[0],
"doctype", d[1], d[1] + ".json"))
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
get_doc_files(files, folder, force, sync_everything, verbose=verbose)
l = len(files)
if l:
for i, doc_path in enumerate(files):
import_file_by_path(doc_path, force=force, ignore_version=True,
reset_permissions=reset_permissions, for_sync=True)
#print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
# show progress bar
update_progress_bar("Updating DocTypes for {0}".format(app_name), i, l)
# print each progress bar on new line
print()
def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
# load in sequence - warning for devs
document_types = ['doctype', 'page', 'report', 'print_format',
'website_theme', 'web_form', 'email_alert', 'print_style',
'data_migration_mapping', 'data_migration_plan']
for doctype in document_types:
doctype_path = os.path.join(start_path, doctype)
if os.path.exists(doctype_path):
for docname in os.listdir(doctype_path):
if os.path.isdir(os.path.join(doctype_path, docname)):
doc_path = os.path.join(doctype_path, docname, docname) + ".json"
if os.path.exists(doc_path):
if not doc_path in files:
files.append(doc_path)
| Python | 0 |
3a85eff683f9d717958d06faca71c2fb7aaa8394 | fix encoding issues by decoding html before Souping it | trunc/web.py | trunc/web.py | # -*- coding: utf-8 -*-
"""
*********
trunc.web
*********
This module provides classes for accessing web pages."""
from __future__ import absolute_import, print_function
import codecs
import time
from bs4 import BeautifulSoup as Soup
from urllib import FancyURLopener
from .util import fibonacci_number
class MyOpener(FancyURLopener):
"""A FancyURLopener object with a custom User-Agent field.
The ``MyOpener.version`` class attribute contains the User-Agent field.
Use ``MyOpener.set_version()`` to change this attribute.
"""
version = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
"AppleWebKit/600.5.17 (KHTML, like Gecko) "
"Version/8.0.5 Safari/600.5.17")
def set_version(self, new_version):
"""Define a new User-Agent field for the MyOpener class.
:param new_version: desired User-Agent field
:type new_version: ``str``
"""
MyOpener.version = new_version
class Webpage(object):
"""Generic webpage with attributes."""
def __init__(self, address, delay=1, encoding='windows-1251'):
"""Initialize the Webpage object.
:param address: url of the webpage
:param delay: ideal delay interval, in seconds, between page loads
(default is ``1``)
:param encoding: encoding of the webpage
"""
self.address = address
self.opener = MyOpener()
self.delay = delay
self.encoding = encoding
def page(self):
"""Open the webpage.
If there's an error opening the page (i.e., if the Corpus throttles
the scraper), wait and retry in successively longer intervals (which
increase according to the Fibonacci sequence) until the page loads
successfully.
:rtype: ``<'instance'>``
"""
attempt = 1
page_not_loaded = True
while page_not_loaded:
try:
time.sleep(self.delay)
self.page = self.opener.open(self.address)
page_not_loaded = False
except IOError as e:
print("\nIOError: {}\nat {}".format(e, self.address))
time.sleep(fibonacci_number(attempt))
attempt += 1
return self.page
def html(self, encoding=None):
"""Return contents of the Webpage as html."""
if encoding is None:
encoding = self.encoding
return self.page().read().decode()
def soup(self):
"""Return contents of the Webpage as a BeautifulSoup object."""
return Soup(self.html())
| # -*- coding: utf-8 -*-
"""
*********
trunc.web
*********
This module provides classes for accessing web pages."""
from __future__ import absolute_import, print_function
import time
from bs4 import BeautifulSoup as Soup
from urllib import FancyURLopener
from .util import fibonacci_number
class MyOpener(FancyURLopener):
"""A FancyURLopener object with a custom User-Agent field.
The ``MyOpener.version`` class attribute contains the User-Agent field.
Use ``MyOpener.set_version()`` to change this attribute.
"""
version = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) "
"AppleWebKit/600.5.17 (KHTML, like Gecko) "
"Version/8.0.5 Safari/600.5.17")
def set_version(self, new_version):
"""Define a new User-Agent field for the MyOpener class.
:param new_version: desired User-Agent field
:type new_version: ``str``
"""
MyOpener.version = new_version
class Webpage(object):
"""Generic webpage with attributes."""
def __init__(self, address, delay=1):
"""Initialize the Webpage object.
:param address: url of the webpage
:param delay: ideal delay interval, in seconds, between page loads
(default is ``1``)
"""
self.address = address
self.opener = MyOpener()
self.delay = delay
def open(self):
"""Open the webpage.
If there's an error opening the page (i.e., if the Corpus throttles
the scraper), wait and retry in successively longer intervals (which
increase according to the Fibonacci sequence) until the page loads
successfully.
:rtype: ``<'instance'>``
"""
attempt = 1
page_not_loaded = True
while page_not_loaded:
try:
time.sleep(self.delay)
self.page = self.opener.open(self.address)
page_not_loaded = False
except IOError as e:
print("\nIOError: {}\nat {}".format(e, self.address))
time.sleep(fibonacci_number(attempt))
attempt += 1
return self.page
def html(self):
"""Return contents of the Webpage as html."""
return self.open().read()
def soup(self):
"""Return contents of the Webpage as a BeautifulSoup object."""
return Soup(self.html())
| Python | 0.000002 |
93cab6327aef7386dba6f293a22099272af6af10 | create resouce only if not exist | src/infrastructure/annotations/requires.py | src/infrastructure/annotations/requires.py | '''
Created on Jun 19, 2013
@author: mpastern
'''
from src.resource.resourcemanager import ResourceManager
from src.errors.resourcemanagernotfounderror import ResourceManagerNotFoundError
class resources(object):
def __init__(self, params):
self.params = params
def __call__(self, original_func):
decorator_self = self
def wrappee(*args, **kwargs):
for resource in decorator_self.params:
rm = (resource.__name__ + 'ResourceManager')
rm_class = ResourceManager.getResourceManager(rm)
if rm_class:
rm_instance = rm_class()
if not rm_instance.get(get_only=True):
rm_instance.add(**kwargs)
# TODO: use **kwargs for private init
else:
raise ResourceManagerNotFoundError(rm)
return original_func(*args, **kwargs)
return wrappee
| '''
Created on Jun 19, 2013
@author: mpastern
'''
from src.resource.resourcemanager import ResourceManager
from src.errors.resourcemanagernotfounderror import ResourceManagerNotFoundError
class resources(object):
def __init__(self, params):
self.params = params
def __call__(self, original_func):
decorator_self = self
def wrappee(*args, **kwargs):
for resource in decorator_self.params:
rm = (resource.__name__ + 'ResourceManager')
rm_class = ResourceManager.getResourceManager(rm)
if rm_class:
rm_instance = rm_class()
# if not rm_instance.get():
# rm_instance.add()
rm_instance.add(**kwargs)
# TODO: use **kwargs for private init
else:
raise ResourceManagerNotFoundError(rm)
return original_func(*args, **kwargs)
return wrappee
| Python | 0 |
71b4c326e18ce7e3d0b6aaab5203b3a403a85810 | Update solution_2.py | Problem033/Python/solution_2.py | Problem033/Python/solution_2.py | import math
frac=1.0
for b in range(1,10):
for a in range(1,b):
for c in range(1,10):
if (a*10+b)/(b*10+c)==a/c:
frac*=(a/c)
print(math.ceil(1/frac))
| import math
frac=1.0
for b in range(1,10):
for a in range(1,b):
for c in range(1,10):
if (a*10+b)/(b*10+c)==a/c:
frac*=(a/c)
print(math.ceil(1/frac))
| Python | 0.000002 |
6d7e597ce216093d52ecdcb7db5c087dc6040bb1 | Fix initiation of settings object | fullcalendar/conf.py | fullcalendar/conf.py | from datetime import timedelta
from django.conf import settings as django_settings
default = {
'FULLCALENDAR_FIRST_WEEKDAY': 0,
'FULLCALENDAR_OCCURRENCE_DURATION': timedelta(hours=1),
'FULLCALENDAR_SITE_COLORS': {}
}
settings = type('SettingsDummy', (), default)
for key, value in default.items():
setattr(settings, key,
getattr(django_settings, key, value))
| from datetime import timedelta
from django.conf import settings as django_settings
default = {
'FULLCALENDAR_FIRST_WEEKDAY': 0,
'FULLCALENDAR_OCCURRENCE_DURATION': timedelta(hours=1),
'FULLCALENDAR_SITE_COLORS': {}
}
settings = object()
for key, value in default.items():
setattr(settings, key,
getattr(django_settings, key, value))
| Python | 0.000004 |
52e614f811fb9dfcd0dde46de43f13731a3717a5 | Reformat doc string for txStatHat.__init__ | txstathat.py | txstathat.py | # -*- coding: utf-8 -*-
"""StatHat bindings"""
from __future__ import division, print_function, unicode_literals
import urllib
from twisted.web.client import getPage
try:
from OpenSSL import SSL # noqa
have_ssl = True
except:
have_ssl = False
API_URI = b'http{}://api.stathat.com/ez'.format(b's' if have_ssl else b'')
class txStatHat(object):
"""An API wrapper for StatHat.com."""
def __init__(self, ezkey):
"""Initialize a txStatHat instance.
Does no network activity.
:param ezkey: your API key, i.e. your e-mail address by default.
"""
self.default_args = {'ezkey': ezkey}
def _make_call(self, args):
"""Build postdata using ezkey and supplied dict *args* and post it."""
post_dict = self.default_args.copy()
post_dict.update(args)
d = getPage(
API_URI,
method=b'POST',
postdata=urllib.urlencode(post_dict),
headers={
b'Content-Type': b'application/x-www-form-urlencoded'
},
)
return d
def count(self, stat, count=1):
"""Add *count* to *stat*.
:param stat: a StatHat counter stat
:param count: the value to add to the counter. 1 by default.
:type count: integer
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'count': unicode(count)})
def value(self, stat, value):
"""Submit *value* to *stat*.
:param stat: a StatHat value stat
:param value: the value to submit
:type value: float or decimal.Decimal
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'value': unicode(value)})
| # -*- coding: utf-8 -*-
"""StatHat bindings"""
from __future__ import division, print_function, unicode_literals
import urllib
from twisted.web.client import getPage
try:
from OpenSSL import SSL # noqa
have_ssl = True
except:
have_ssl = False
API_URI = b'http{}://api.stathat.com/ez'.format(b's' if have_ssl else b'')
class txStatHat(object):
"""An API wrapper for StatHat.com."""
def __init__(self, ezkey):
"""Initialize a txStatHat instance.
*ezkey* is you API key, i.e. your e-mail address by default. Does no
network activity.
"""
self.default_args = {'ezkey': ezkey}
def _make_call(self, args):
"""Build postdata using ezkey and supplied dict *args* and post it."""
post_dict = self.default_args.copy()
post_dict.update(args)
d = getPage(
API_URI,
method=b'POST',
postdata=urllib.urlencode(post_dict),
headers={
b'Content-Type': b'application/x-www-form-urlencoded'
},
)
return d
def count(self, stat, count=1):
"""Add *count* to *stat*.
:param stat: a StatHat counter stat
:param count: the value to add to the counter. 1 by default.
:type count: integer
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'count': unicode(count)})
def value(self, stat, value):
"""Submit *value* to *stat*.
:param stat: a StatHat value stat
:param value: the value to submit
:type value: float or decimal.Decimal
:rtype: twisted.internet.defer.Deferred
"""
return self._make_call({'stat': stat, 'value': unicode(value)})
| Python | 0 |
74d5f5c1fe49d0f574a923fc490cd064f3cd52c5 | allow specifying random state | galstreams/random.py | galstreams/random.py |
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
__all__ = ['get_uniform_spherical_angles', 'get_uniform_sphere']
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg)
def get_uniform_spherical_angles(size=1,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg,
random_state=None):
"""Generate uniform random positions on the sphere
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity` (optional)
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity` (optional)
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
random_state : `numpy.random.RandomState` (optional)
A numpy random state object used to control the random number generator
and seed.
Returns
-------
representation : `~astropy.coordinates.UnitSphericalRepresentation`
An astropy unit spherical representation object containing the random
spherical positions.
"""
if random_state is None:
random_state = np.random
lon = np.random.uniform(lon_lim[0].value,
lon_lim[1].value,
size) * lon_lim.unit
K = np.sin(lat_lim[1]) - np.sin(lat_lim[0])
arg = K * random_state.uniform(size=size) + np.sin(lat_lim[0])
lat = np.arcsin(arg)
return coord.UnitSphericalRepresentation(lon, lat)
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg, dist_lim=[u.one, u.pc])
def get_uniform_sphere(size,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg,
dist_lim=[0, 1.]*u.one,
random_state=None):
"""Generate uniform random positions inside a spherical volume.
i.e. this can be used to generate points uniformly distributed through a
spherical annulus by specifying the distance limits.
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity`
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity`
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
dist_lim : `~astropy.units.Quantity`
The distance limits to generate as an astropy Quantity, either
dimensionless or with length units.
random_state : `numpy.random.RandomState` (optional)
A numpy random state object used to control the random number generator
and seed.
Returns
-------
representation : `~astropy.coordinates.SphericalRepresentation`
An astropy spherical representation object containing the random
spherical positions.
"""
if random_state is None:
random_state = np.random
rep = get_uniform_spherical_angles(size=size,
lon_lim=lon_lim,
lat_lim=lat_lim,
random_state=random_state)
# R distributed as R^2
r = np.cbrt(random_state.uniform(dist_lim[0].value**3,
dist_lim[1].value**3,
size=size)) * dist_lim.unit
return coord.SphericalRepresentation(lon=rep.lon,
lat=rep.lat,
distance=r)
|
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
__all__ = ['get_uniform_spherical_angles', 'get_uniform_sphere']
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg)
def get_uniform_spherical_angles(size=1,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg):
"""Generate uniform random positions on the sphere
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity`
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity`
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
Returns
-------
representation : `~astropy.coordinates.UnitSphericalRepresentation`
An astropy unit spherical representation object containing the random
spherical positions.
"""
lon = np.random.uniform(lon_lim[0].value,
lon_lim[1].value,
size) * lon_lim.unit
K = np.sin(lat_lim[1]) - np.sin(lat_lim[0])
arg = K * np.random.uniform(size=size) + np.sin(lat_lim[0])
lat = np.arcsin(arg)
return coord.UnitSphericalRepresentation(lon, lat)
@u.quantity_input(lon_lim=u.deg, lat_lim=u.deg, dist_lim=[u.one, u.pc])
def get_uniform_sphere(size,
lon_lim=[0., 360]*u.deg,
lat_lim=[-90., 90]*u.deg,
dist_lim=[0, 1.]*u.one):
"""Generate uniform random positions inside a spherical volume.
i.e. this can be used to generate points uniformly distributed through a
spherical annulus by specifying the distance limits.
Parameters
----------
size : int
The number of points to generate.
lon_lim : `~astropy.units.Quantity`
The longitude limits to generate as an astropy Angle object or Quantity
with angular units.
lat_lim : `~astropy.units.Quantity`
The latitude limits to generate as an astropy Angle object or Quantity
with angular units.
dist_lim : `~astropy.units.Quantity`
The distance limits to generate as an astropy Quantity, either
dimensionless or with length units.
Returns
-------
representation : `~astropy.coordinates.SphericalRepresentation`
An astropy spherical representation object containing the random
spherical positions.
"""
# R distributed as R^2
r = np.cbrt(np.random.uniform(dist_lim[0].value**3,
dist_lim[1].value**3,
size=size)) * dist_lim.unit
rep = get_uniform_spherical_angles(size=size,
lon_lim=lon_lim,
lat_lim=lat_lim)
return coord.SphericalRepresentation(lon=rep.lon,
lat=rep.lat,
distance=r)
| Python | 0.000001 |
c7a4bfdeb8e20d4469dac85370f640cb944db0d9 | Remove python2 support | src/chattymarkov/database/__init__.py | src/chattymarkov/database/__init__.py | """Chattymarkov database submodule.
This submodule gathers all the supported database formats.
"""
from .databases import JSONFileDatabase, MemoryDatabase, RedisDatabase
class ChattymarkovDatabaseError(Exception):
"""Base exception class for chattymarkov.database related errors."""
class UnknownDatabasePrefixError(ChattymarkovDatabaseError):
"""Exception class for unknown database prefixes errors."""
class InvalidConnectionStringError(ChattymarkovDatabaseError):
"""Exception class for invalid connection string error."""
_DATABASE_PREFIXES = {}
def database(prefix):
"""Wrap a function responsible for building a database."""
def wrapper(func):
"""Register `func` in the global `_DATABASE_PREFIXES` hash."""
_DATABASE_PREFIXES[prefix] = func
return func
return wrapper
def get_database_builder(prefix):
"""Get the function associated to `prefix` to instanciate a database.
This function is a simple interface around the `_DATABASE_PREFIXES` hash.
Args:
prefix (str): the prefix's database function.
Raises:
UnknownDatabasePrefixError: the prefix is not recognized.
Returns:
function: the function assiociated to the `prefix`.
"""
if prefix not in _DATABASE_PREFIXES:
raise UnknownDatabasePrefixError(
"Database prefix '{}' is unknown.".format(prefix))
return _DATABASE_PREFIXES[prefix]
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(';')
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], []
@database('redis')
def build_redis_database(resource):
"""Build a `RedisDatabase` instance to communicate with a redis server.
Args:
resource (str): a string that represents connection information.
Returns:
RedisDatabase: instance to communicate with the redis server.
"""
whitelist = {'password', 'db'}
extra_params = {}
connection, params = _get_connection_params(resource)
# Parse additional parameters, if any
if len(params) > 0:
for param in params:
key, equal, value = param.partition('=')
if key in whitelist:
extra_params[key] = value
if connection.startswith('/'):
# UNIX socket connection
return RedisDatabase(unix_socket_path=connection,
**extra_params)
else:
# TCP socket connection
host, colon, port = connection.partition(':')
if host != '' and colon == ':' and port.isnumeric():
return RedisDatabase(host=host, port=int(port),
**extra_params)
@database('memory')
def build_memory_database(resource):
"""Build a `MemoryDatabase` instance.
Args:
resource (str): path to the memory location. It has actually no sense
at that time. Should be "memory://" anyway.
Returns:
MemoryDatabase: an instance of MemoryDatabase that handles a
connection to the desired database.
"""
return MemoryDatabase()
@database('json')
def build_json_database(resource):
"""Build a `JSONFileDatabase` instance.
Args:
resource (str): path to the JSON file representing the database. If
the file is not empty, it will be loaded. In every cases, upon
instance destruction, the database will be stored in the specified
file.
Returns:
JSONFileDatabase: an instance of JSONFileDatabase that handles a
connection to the desired database.
"""
return JSONFileDatabase(resource)
def build_database_connection(connect_string):
"""Build a database connection based on `connect_string`.
Args:
connect_string (str): connection string for the database connection.
Raises:
InvalidConnectionStringError: raised when the `connect_string` is
invalid.
UnknownDatabasePrefixError: raised when the database prefix is
unknown.
Returns:
AbstractDatabase: an instance of AbstractDatabase that handle a
connection to the desired database.
"""
prefix, colon_slash_slash, resource = connect_string.partition('://')
if colon_slash_slash != '':
builder = get_database_builder(prefix)
return builder(resource)
else:
raise InvalidConnectionStringError(
"Invalid connection string '{}'. Must be of the form "
"prefix://[resource[;param1=value1;param2=value2...]]".format(
prefix))
| """Chattymarkov database submodule.
This submodule gathers all the supported database formats.
"""
import six
from .databases import JSONFileDatabase, MemoryDatabase, RedisDatabase
class ChattymarkovDatabaseError(Exception):
"""Base exception class for chattymarkov.database related errors."""
class UnknownDatabasePrefixError(ChattymarkovDatabaseError):
"""Exception class for unknown database prefixes errors."""
class InvalidConnectionStringError(ChattymarkovDatabaseError):
"""Exception class for invalid connection string error."""
_DATABASE_PREFIXES = {}
def database(prefix):
"""Wrap a function responsible for building a database."""
def wrapper(func):
"""Register `func` in the global `_DATABASE_PREFIXES` hash."""
_DATABASE_PREFIXES[prefix] = func
return func
return wrapper
def get_database_builder(prefix):
"""Get the function associated to `prefix` to instanciate a database.
This function is a simple interface around the `_DATABASE_PREFIXES` hash.
Args:
prefix (str): the prefix's database function.
Raises:
UnknownDatabasePrefixError: the prefix is not recognized.
Returns:
function: the function assiociated to the `prefix`.
"""
if prefix not in _DATABASE_PREFIXES:
raise UnknownDatabasePrefixError(
"Database prefix '{}' is unknown.".format(prefix))
return _DATABASE_PREFIXES[prefix]
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(';')
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], []
@database('redis')
def build_redis_database(resource):
"""Build a `RedisDatabase` instance to communicate with a redis server.
Args:
resource (str): a string that represents connection information.
Returns:
RedisDatabase: instance to communicate with the redis server.
"""
whitelist = {'password', 'db'}
extra_params = {}
connection, params = _get_connection_params(resource)
# Parse additional parameters, if any
if len(params) > 0:
for param in params:
key, equal, value = param.partition('=')
if key in whitelist:
extra_params[key] = value
if connection.startswith('/'):
# UNIX socket connection
return RedisDatabase(unix_socket_path=connection,
**extra_params)
else:
# TCP socket connection
host, colon, port = connection.partition(':')
if six.PY2:
port = unicode(port) # noqa
if host != '' and colon == ':' and port.isnumeric():
return RedisDatabase(host=host, port=int(port),
**extra_params)
@database('memory')
def build_memory_database(resource):
"""Build a `MemoryDatabase` instance.
Args:
resource (str): path to the memory location. It has actually no sense
at that time. Should be "memory://" anyway.
Returns:
MemoryDatabase: an instance of MemoryDatabase that handles a
connection to the desired database.
"""
return MemoryDatabase()
@database('json')
def build_json_database(resource):
"""Build a `JSONFileDatabase` instance.
Args:
resource (str): path to the JSON file representing the database. If
the file is not empty, it will be loaded. In every cases, upon
instance destruction, the database will be stored in the specified
file.
Returns:
JSONFileDatabase: an instance of JSONFileDatabase that handles a
connection to the desired database.
"""
return JSONFileDatabase(resource)
def build_database_connection(connect_string):
"""Build a database connection based on `connect_string`.
Args:
connect_string (str): connection string for the database connection.
Raises:
InvalidConnectionStringError: raised when the `connect_string` is
invalid.
UnknownDatabasePrefixError: raised when the database prefix is
unknown.
Returns:
AbstractDatabase: an instance of AbstractDatabase that handle a
connection to the desired database.
"""
prefix, colon_slash_slash, resource = connect_string.partition('://')
if colon_slash_slash != '':
builder = get_database_builder(prefix)
return builder(resource)
else:
raise InvalidConnectionStringError(
"Invalid connection string '{}'. Must be of the form "
"prefix://[resource[;param1=value1;param2=value2...]]".format(
prefix))
| Python | 0.000011 |
b2b123b15f178e81737127a4dda399a31ebb5240 | Update Dice_Probability.py | Week2-Python-Libraries-and-Concepts-Used-in-Research/Dice_Probability.py | Week2-Python-Libraries-and-Concepts-Used-in-Research/Dice_Probability.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 16:26:48 2017
@author: lamahamadeh
"""
#First: Python-based implementation
#------------------------------------
'''
source:
-------
Video 2.4.2: Examples Involving Randomness
Week 2 Overview/Python Libraries and Concepts Used in Research
Using python for research
Harvard
online course provided by edx.org
url: https://courses.edx.org/courses/course-v1:HarvardX+PH526x+3T2016/courseware/317ce880d7644d35840b1f734be76b06/391063d8f58242e892efafc9903b36e8/
'''
#roll a dice 100 times and plot a histogram of the outcomes
#meaning: a histogram that shows how frequent the numbers from 1 to 6 appeared in the 100 samples
import random
import matplotlib.pyplot as plt
random.choice([1,2,3,4,5,6]) #this line throws the dice one time
rolls = []
for k in range(100):#we can try 1000, 10000000 times. We can notice that the histogram gets more flat when the number of rolling times increases.
rolls.append(random.choice([1,2,3,4,5,6]))#in this case, after using for loop, we wre rolling the dice 100 times
print(len(rolls))
#draw a histogram
plt.figure()
plt.hist(rolls, bins = np.linspace(0.5,6.5,7));
plt.show()
#This time we will roll 10 dice not jsut one
ys = []
for rep in range(100):#By increasing the number of dice rolls for each dice the distrbution follows the central limit theorem
#The central limit theorem (CLT) states that the sum of a large number of random variables regardless of their distribution will
#approximately follow a normal distribution (or Gaussian distribution).
y = 0
for k in range (10):
x = random.choice([1,2,3,4,5,6])
y = y + x
ys.append(y)
print(len(ys)) #100
print(min(ys))
print(max(ys))
plt.figure()
plt.hist(ys); #the semicolon suppresses the output
plt.show()
#------------------------------------------------------------------
#Second: NumPy implementation
#---------------------------
'''
source:
-------
Video 2.4.3: using the NumPy Random Module
Week 2 Overview/Python Libraries and Concepts Used in Research
Using python for research
Harvard
online course provided by edx.org
url: https://courses.edx.org/courses/course-v1:HarvardX+PH526x+3T2016/courseware/317ce880d7644d35840b1f734be76b06/391063d8f58242e892efafc9903b36e8/
'''
import numpy as np
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 16:26:48 2017
@author: lamahamadeh
source:
-------
Video 2.4.2: Examples Involving Randomness
Week 2 Overview/Python Libraries and Concepts Used in Research
Using python for research
Harvard
online course provided by edx.org
url: https://courses.edx.org/courses/course-v1:HarvardX+PH526x+3T2016/courseware/317ce880d7644d35840b1f734be76b06/391063d8f58242e892efafc9903b36e8/
"""
#roll a dice 100 times and plot a histogram of the outcomes
#meaning: a histogram that shows how frequent the numbers from 1 to 6 appeared in the 100 samples
import numpy as np
import random
import matplotlib.pyplot as plt
random.choice([1,2,3,4,5,6]) #this line throws the dice one time
rolls = []
for k in range(100):#we can try 1000, 10000000 times. We can notice that the histogram gets more flat when the number of rolling times increases.
rolls.append(random.choice([1,2,3,4,5,6]))#in this case, after using for loop, we wre rolling the dice 100 times
print(len(rolls))
#draw a histogram
plt.figure()
plt.hist(rolls, bins = np.linspace(0.5,6.5,7));
plt.show()
#This time we will roll 10 dice not jsut one
ys = []
for rep in range(100):#By increasing the number of dice rolls for each dice the distrbution follows the central limit theorem
#The central limit theorem (CLT) states that the sum of a large number of random variables regardless of their distribution will
#approximately follow a normal distribution (or Gaussian distribution).
y = 0
for k in range (10):
x = random.choice([1,2,3,4,5,6])
y = y + x
ys.append(y)
print(len(ys)) #100
print(min(ys))
print(max(ys))
plt.figure()
plt.hist(ys); #the semicolon suppresses the output
plt.show()
| Python | 0.000001 |
67c4d077ee4693290bf9883e90e4ed381b3cd227 | Fix a mistake. | python/matplotlib/hist_logscale_xy.py | python/matplotlib/hist_logscale_xy.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See:
# -
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [‘bar’ | ‘barstacked’ | ‘step’ | ‘stepfilled’]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
vmin = np.log10(data.min())
vmax = np.log10(data.max())
bins = np.logspace(vmin, vmax, 50) # <- make a range from 10**vmin to 10**vmax
print(bins)
res_tuple = ax2.hist(data,
log=True, # <- Activate log scale on Y axis
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on Y axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_xy.png")
plt.show()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See:
# -
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [‘bar’ | ‘barstacked’ | ‘step’ | ‘stepfilled’]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
min = np.log10(data.min())
max = np.log10(data.max())
bins = np.logspace(min, max, 50) # <- create a range from 10**min to 10**max
print(bins)
res_tuple = ax2.hist(data,
log=True, # <- Activate log scale on Y axis
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on Y axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_xy.png")
plt.show()
| Python | 0.003448 |
e300d739bf0040b76a0deee75cc01b1410ba8953 | change image field to name in CatalogoLandsat serializer | indicarprocess/tmsapi/serializers.py | indicarprocess/tmsapi/serializers.py | # -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from catalogo.models import CatalogoLandsat
class LandsatSerializer(ModelSerializer):
southwest = SerializerMethodField()
northeast = SerializerMethodField()
name = SerializerMethodField()
class Meta:
model = CatalogoLandsat
fields = ['name', 'data', 'southwest', 'northeast']
def get_bounds(self, obj):
lats = []
lons = []
for lat, lon in obj.shape.coords[0]:
lats.append(lat)
lons.append(lon)
lats.sort()
lons.sort()
return [[lats[-1], lons[-1]], [lats[0], lons[0]]]
def get_southwest(self, obj):
return self.get_bounds(obj)[-1]
def get_northeast(self, obj):
return self.get_bounds(obj)[0]
def get_name(self, obj):
return obj.image.replace('.tif', '')
| # -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from catalogo.models import CatalogoLandsat
class LandsatSerializer(ModelSerializer):
southwest = SerializerMethodField()
northeast = SerializerMethodField()
class Meta:
model = CatalogoLandsat
fields = ['image', 'data', 'southwest', 'northeast']
def get_bounds(self, obj):
lats = []
lons = []
for lat, lon in obj.shape.coords[0]:
lats.append(lat)
lons.append(lon)
lats.sort()
lons.sort()
return [[lats[-1], lons[-1]], [lats[0], lons[0]]]
def get_southwest(self, obj):
return self.get_bounds(obj)[-1]
def get_northeast(self, obj):
return self.get_bounds(obj)[0]
| Python | 0 |
05a8f2a2e499b25472fbaf1b06e899f589a7101f | fix migration | editor/migrations/0003_auto_20150125_0430.py | editor/migrations/0003_auto_20150125_0430.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from editor.models import Source, Category, Format
# add root data for Source and Category model
def add_root_data(apps, schema_editor):
cat = Category(name ="root", parent=None)
cat.save()
source = Source(
name = "root",
abbreviation = "root",
domain = "",
homepage = "",
about = "",
parent = None,
)
source.save()
source.categories.add(cat)
f = Format(name ="root", parent=None)
f.save()
def revert(apps, schema_editor):
for source in Source.objects.all():
source.delete()
for category in Category.objects.all():
category.delete()
for f in Format.objects.all():
f.delete()
class Migration(migrations.Migration):
dependencies = [
('editor', '0002_auto_20150124_1912'),
]
operations = [
migrations.RunPython(add_root_data, reverse_code=revert),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from editor.models import Source, Category
# add root data for Source and Category model
def add_root_data(apps, schema_editor):
cat = Category(name ="root", parent=None)
cat.save()
source = Source(
name = "root",
abbreviation = "root",
domain = "",
homepage = "",
about = "",
parent = None,
)
source.save()
source.categories.add(cat)
def revert(apps, schema_editor):
for source in Source.objects.all():
source.delete()
for category in Category.objects.all():
category.delete()
class Migration(migrations.Migration):
dependencies = [
('editor', '0002_auto_20150124_1912'),
]
operations = [
migrations.RunPython(add_root_data, reverse_code=revert),
]
| Python | 0.000001 |
1cfd11d1a6aa1d949067e5b24b5bfb2cca10ad09 | add index | requestspool/paction/all.py | requestspool/paction/all.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : windprog@gmail.com
Date : 14/12/26
Desc :
"""
from httpappengine.decorator import url
from httpappengine.helper import not_found
from httplib import responses
from requestspool.util import get_route
def all_req(path_url, environ, start_response):
method = environ.get('REQUEST_METHOD').upper()
if not (path_url.startswith(u"http://") or path_url.startswith(u"https://")):
path_url = u"http://" + unicode(path_url)
req_query_string = environ.get("QUERY_STRING", "")
try:
# 获取data
req_data = environ['wsgi.input'].read(int(environ.get('CONTENT_LENGTH', '0')))
except:
req_data = None
requestpool_headers = {}
req_headers = {}
for key, val in environ.iteritems():
if key.startswith('HTTP_'):
# 生成req_headers 暂无需求
header_name = key[5:].replace('_', '-')
if header_name == 'host'.upper():
continue
if 'REQUESTSPOOL.' in header_name:
requestpool_headers[header_name] = val
else:
req_headers[header_name] = val
route = get_route(path_url)
status_code, headers, output = route.http_result(requestpool_headers=requestpool_headers,
url=path_url, method=method, req_query_string=req_query_string,
req_data=req_data, req_headers=req_headers)
start_response(
"{0} {1}".format(status_code, responses.get(status_code, 'OK')),
headers.items())
return output
@url("/http://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def http_req(path_url, environ, start_response):
# return all_req(u'http://'+path_url, environ, start_response)
return all_req(u'http://'+path_url, environ, start_response)
@url("/https://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def https_req(path_url, environ, start_response):
return all_req(u'https://'+path_url, environ, start_response)
@url("/admin/route/add", "POST")
def route_add(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/admin/route/all", "GET")
def route_show_all(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/check", "GET")
def check(environ, start_response):
# 检测get_route
get_route('http://test')
s = "Running!\n"
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(s)))
])
return s
@url("/", "GET")
def index(environ, start_response):
return check(environ, start_response) | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 windpro
Author : windpro
E-mail : windprog@gmail.com
Date : 14/12/26
Desc :
"""
from httpappengine.decorator import url
from httpappengine.helper import not_found
from httplib import responses
from requestspool.util import get_route
def all_req(path_url, environ, start_response):
method = environ.get('REQUEST_METHOD').upper()
if not (path_url.startswith(u"http://") or path_url.startswith(u"https://")):
path_url = u"http://" + unicode(path_url)
req_query_string = environ.get("QUERY_STRING", "")
try:
# 获取data
req_data = environ['wsgi.input'].read(int(environ.get('CONTENT_LENGTH', '0')))
except:
req_data = None
requestpool_headers = {}
req_headers = {}
for key, val in environ.iteritems():
if key.startswith('HTTP_'):
# 生成req_headers 暂无需求
header_name = key[5:].replace('_', '-')
if header_name == 'host'.upper():
continue
if 'REQUESTSPOOL.' in header_name:
requestpool_headers[header_name] = val
else:
req_headers[header_name] = val
route = get_route(path_url)
status_code, headers, output = route.http_result(requestpool_headers=requestpool_headers,
url=path_url, method=method, req_query_string=req_query_string,
req_data=req_data, req_headers=req_headers)
start_response(
"{0} {1}".format(status_code, responses.get(status_code, 'OK')),
headers.items())
return output
@url("/http://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def http_req(path_url, environ, start_response):
# return all_req(u'http://'+path_url, environ, start_response)
return all_req(u'http://'+path_url, environ, start_response)
@url("/https://<path:path_url>", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
def https_req(path_url, environ, start_response):
return all_req(u'https://'+path_url, environ, start_response)
@url("/admin/route/add", "POST")
def route_add(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/admin/route/all", "GET")
def route_show_all(environ, start_response):
# 尚未实现
return not_found(start_response)
@url("/check", "GET")
def check(environ, start_response):
# 检测get_route
get_route('http://test')
s = "Running!\n"
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(s)))
])
return s | Python | 0.000607 |
53df723a1574e62b4a74d56667c131793cf6c506 | add retrieve all users and one user queries | users_handler.py | users_handler.py | from models.users import User
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
class UsersHandler:
def __init__(self, DB):
self.db = DB
def create_user(self, user_data):
collection = self.db.users
user = collection.find_one({"username": user_data["username"]})
if not user:
new_user = User(user_data)
collection.insert_one(new_user.__dict__)
logging.info("User Created")
return True
logging.warning("User already exists")
return False
def retrieve_users(self):
collection= self.db.users
users = collection.find()
logging.info("All users retrived successfully")
return users
def retrieve_user(self, username):
collection = self.db.users
user = collection.find_one({'username': username})
if user:
logging.info("Users retrived successfully")
return user
logging.error("User <{}> does not exists".format(username))
return None
| from models.users import User
class UsersHandler:
def __init__(self, DB):
self.db = DB
def create_user(self, user_data):
collection = self.db.users
new_user = User(user_data)
collection.insert_one(new_user.__dict__)
| Python | 0.000001 |
efdf57f4f688c66402c5b18152f2448a100a55a9 | Generate departures region by region | busstops/management/commands/generate_departures.py | busstops/management/commands/generate_departures.py | from datetime import date, timedelta
from django.core.management.base import BaseCommand
from django.db import transaction
from txc import txc
from ...models import Region, Service, Journey, StopUsageUsage, StopPoint
from ...utils import get_files_from_zipfile
ONE_DAY = timedelta(days=1)
def handle_timetable(service, timetable, day):
if hasattr(timetable, 'operating_profile') and day.weekday() not in timetable.operating_profile.regular_days:
return
if not timetable.operating_period.contains(day):
return
# if not hasattr(timetable, 'groupings'):
# return
for grouping in timetable.groupings:
stops = {row.part.stop.atco_code for row in grouping.rows}
existent_stops = StopPoint.objects.filter(atco_code__in=stops).values_list('atco_code', flat=True)
for vj in grouping.journeys:
if not vj.should_show(day):
continue
date = day
previous_time = None
stopusageusages = []
journey = Journey(service=service, datetime='{} {}'.format(date, vj.departure_time))
for i, (su, time) in enumerate(vj.get_times()):
if previous_time and previous_time > time:
date += ONE_DAY
if su.stop.atco_code in existent_stops:
if not su.activity or su.activity.startswith('pickUp'):
stopusageusages.append(
StopUsageUsage(datetime='{} {}'.format(date, time),
order=i, stop_id=su.stop.atco_code)
)
journey.destination_id = su.stop.atco_code
previous_time = time
if journey.destination_id:
journey.save()
for suu in stopusageusages:
suu.journey = journey
StopUsageUsage.objects.bulk_create(stopusageusages)
@transaction.atomic
def handle_region(region):
Journey.objects.filter(service__region=region).delete()
day = date.today()
for service in Service.objects.filter(region=region, current=True):
print(service)
for i, xml_file in enumerate(get_files_from_zipfile(service)):
timetable = txc.Timetable(xml_file, None)
handle_timetable(service, timetable, day)
j = 1
while j < 7:
handle_timetable(service, timetable, day + ONE_DAY * j)
j += 1
class Command(BaseCommand):
def handle(self, *args, **options):
for region in Region.objects.all().exclude(id__in=('L', 'Y', 'NI')):
print(region)
handle_region(region)
| from datetime import date, timedelta
from django.core.management.base import BaseCommand
from django.db import transaction
from txc import txc
from ...models import Service, Journey, StopUsageUsage, StopPoint
from ...utils import get_files_from_zipfile
ONE_DAY = timedelta(days=1)
def handle_timetable(service, timetable, day):
if day.weekday() not in timetable.operating_profile.regular_days:
return
if not timetable.operating_period.contains(day):
return
# if not hasattr(timetable, 'groupings'):
# return
for grouping in timetable.groupings:
stops = {row.part.stop.atco_code for row in grouping.rows}
existent_stops = StopPoint.objects.filter(atco_code__in=stops).values_list('atco_code', flat=True)
for vj in grouping.journeys:
if not vj.should_show(day):
continue
date = day
previous_time = None
stopusageusages = []
journey = Journey(service=service, datetime='{} {}'.format(date, vj.departure_time))
for i, (su, time) in enumerate(vj.get_times()):
if previous_time and previous_time > time:
date += ONE_DAY
if su.stop.atco_code in existent_stops:
if not su.activity or su.activity.startswith('pickUp'):
stopusageusages.append(
StopUsageUsage(datetime='{} {}'.format(date, time),
order=i, stop_id=su.stop.atco_code)
)
journey.destination_id = su.stop.atco_code
previous_time = time
if journey.destination_id:
journey.save()
for suu in stopusageusages:
suu.journey = journey
StopUsageUsage.objects.bulk_create(stopusageusages)
class Command(BaseCommand):
@transaction.atomic
def handle(self, *args, **options):
Journey.objects.all().delete()
day = date.today()
for service in Service.objects.filter(current=True,
region__in=('EA',)).exclude(region__in=('L', 'Y', 'NI')):
print(service)
for i, xml_file in enumerate(get_files_from_zipfile(service)):
timetable = txc.Timetable(xml_file, None)
handle_timetable(service, timetable, day)
j = 1
while j < 7:
handle_timetable(service, timetable, day + ONE_DAY * j)
j += 1
| Python | 0.999999 |
0d5d1263af2fca0955c4db0f603af11c321fe624 | Fix the class name for del_dynamic_range | lib/python2.6/aquilon/server/commands/del_dynamic_range.py | lib/python2.6/aquilon/server/commands/del_dynamic_range.py | # ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2009,2010 Contributor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the EU DataGrid Software License. You should
# have received a copy of the license with this program, and the
# license is published at
# http://eu-datagrid.web.cern.ch/eu-datagrid/license.html.
#
# THE FOLLOWING DISCLAIMER APPLIES TO ALL SOFTWARE CODE AND OTHER
# MATERIALS CONTRIBUTED IN CONNECTION WITH THIS PROGRAM.
#
# THIS SOFTWARE IS LICENSED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE AND ANY WARRANTY OF NON-INFRINGEMENT, ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS
# SOFTWARE MAY BE REDISTRIBUTED TO OTHERS ONLY BY EFFECTIVELY USING
# THIS OR ANOTHER EQUIVALENT DISCLAIMER AS WELL AS ANY OTHER LICENSE
# TERMS THAT MAY APPLY.
from sqlalchemy.sql.expression import asc
from aquilon.server.broker import BrokerCommand
from aquilon.aqdb.model import System
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.exceptions_ import ArgumentError
from aquilon.server.locks import lock_queue, DeleteKey
class CommandDelDynamicRange(BrokerCommand):
required_parameters = ["startip", "endip"]
def render(self, session, logger, startip, endip, **arguments):
key = DeleteKey("system", logger=logger)
try:
lock_queue.acquire(key)
self.del_dynamic_range(session, logger, startip, endip)
session.commit()
finally:
lock_queue.release(key)
return
def del_dynamic_range(self, session, logger, startip, endip):
startnet = get_net_id_from_ip(session, startip)
endnet = get_net_id_from_ip(session, endip)
if startnet != endnet:
raise ArgumentError("IPs '%s' (%s) and '%s' (%s) must be on the "
"same subnet" %
(startip, startnet.ip, endip, endnet.ip))
q = session.query(System)
q = q.filter(System.ip >= startip)
q = q.filter(System.ip <= endip)
q = q.order_by(asc(System.ip))
existing = q.all()
if not existing:
raise ArgumentError("Nothing found in range.")
if existing[0].ip != startip:
raise ArgumentError("No system found with IP address '%s'" %
startip)
if existing[-1].ip != endip:
raise ArgumentError("No system found with IP address '%s'" %
endip)
invalid = [s for s in existing if s.system_type != 'dynamic_stub']
if invalid:
raise ArgumentError("The range contains non-dynamic systems:\n" +
"\n".join(["%s (%s)" % (i.fqdn, i.ip)
for i in invalid]))
for stub in existing:
session.delete(stub)
return
| # ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2009,2010 Contributor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the EU DataGrid Software License. You should
# have received a copy of the license with this program, and the
# license is published at
# http://eu-datagrid.web.cern.ch/eu-datagrid/license.html.
#
# THE FOLLOWING DISCLAIMER APPLIES TO ALL SOFTWARE CODE AND OTHER
# MATERIALS CONTRIBUTED IN CONNECTION WITH THIS PROGRAM.
#
# THIS SOFTWARE IS LICENSED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE AND ANY WARRANTY OF NON-INFRINGEMENT, ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS
# SOFTWARE MAY BE REDISTRIBUTED TO OTHERS ONLY BY EFFECTIVELY USING
# THIS OR ANOTHER EQUIVALENT DISCLAIMER AS WELL AS ANY OTHER LICENSE
# TERMS THAT MAY APPLY.
from sqlalchemy.sql.expression import asc
from aquilon.server.broker import BrokerCommand
from aquilon.aqdb.model import System
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.exceptions_ import ArgumentError
from aquilon.server.locks import lock_queue, DeleteKey
class CommandAddDynamicRange(BrokerCommand):
required_parameters = ["startip", "endip"]
def render(self, session, logger, startip, endip, **arguments):
key = DeleteKey("system", logger=logger)
try:
lock_queue.acquire(key)
self.del_dynamic_range(session, logger, startip, endip)
session.commit()
finally:
lock_queue.release(key)
return
def del_dynamic_range(self, session, logger, startip, endip):
startnet = get_net_id_from_ip(session, startip)
endnet = get_net_id_from_ip(session, endip)
if startnet != endnet:
raise ArgumentError("IPs '%s' (%s) and '%s' (%s) must be on the "
"same subnet" %
(startip, startnet.ip, endip, endnet.ip))
q = session.query(System)
q = q.filter(System.ip >= startip)
q = q.filter(System.ip <= endip)
q = q.order_by(asc(System.ip))
existing = q.all()
if not existing:
raise ArgumentError("Nothing found in range.")
if existing[0].ip != startip:
raise ArgumentError("No system found with IP address '%s'" %
startip)
if existing[-1].ip != endip:
raise ArgumentError("No system found with IP address '%s'" %
endip)
invalid = [s for s in existing if s.system_type != 'dynamic_stub']
if invalid:
raise ArgumentError("The range contains non-dynamic systems:\n" +
"\n".join(["%s (%s)" % (i.fqdn, i.ip)
for i in invalid]))
for stub in existing:
session.delete(stub)
return
| Python | 0.999991 |
d809c3cef9761edbf984ea9a8cf066a2f474c58d | fix integration tests | test/integration/013_context_var_tests/test_context_vars.py | test/integration/013_context_var_tests/test_context_vars.py | from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
import dbt.flags
class TestContextVars(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.fields = [
'this',
'this.name',
'this.schema',
'this.table',
'target.dbname',
'target.host',
'target.name',
'target.port',
'target.schema',
'target.threads',
'target.type',
'target.user',
'target.pass',
'run_started_at',
'invocation_id'
]
@property
def schema(self):
return "context_vars_013"
@property
def models(self):
return "test/integration/013_context_var_tests/models"
@property
def profile_config(self):
return {
'test': {
'outputs': {
'dev': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
},
'prod': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
}
},
'target': 'dev'
}
}
def get_ctx_vars(self):
field_list = ", ".join(['"{}"'.format(f) for f in self.fields])
query = 'select {field_list} from {schema}.context'.format(
field_list=field_list,
schema=self.schema)
vals = self.run_sql(query, fetch='all')
ctx = dict([(k, v) for (k, v) in zip(self.fields, vals[0])])
return ctx
@attr(type='postgres')
def test_env_vars_dev(self):
self.run_dbt(['run'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context__dbt_tmp"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context__dbt_tmp')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'dev')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
@attr(type='postgres')
def test_env_vars_prod(self):
self.run_dbt(['run', '--target', 'prod'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context__dbt_tmp"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context__dbt_tmp')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'prod')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
| from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
import dbt.flags
class TestContextVars(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.fields = [
'this',
'this.name',
'this.schema',
'this.table',
'target.dbname',
'target.host',
'target.name',
'target.port',
'target.schema',
'target.threads',
'target.type',
'target.user',
'target.pass',
'run_started_at',
'invocation_id'
]
@property
def schema(self):
return "context_vars_013"
@property
def models(self):
return "test/integration/013_context_var_tests/models"
@property
def profile_config(self):
return {
'test': {
'outputs': {
'dev': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
},
'prod': {
'type': 'postgres',
'threads': 1,
'host': 'database',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.schema
}
},
'target': 'dev'
}
}
def get_ctx_vars(self):
field_list = ", ".join(['"{}"'.format(f) for f in self.fields])
query = 'select {field_list} from {schema}.context'.format(
field_list=field_list,
schema=self.schema)
vals = self.run_sql(query, fetch='all')
ctx = dict([(k, v) for (k, v) in zip(self.fields, vals[0])])
return ctx
@attr(type='postgres')
def test_env_vars_dev(self):
self.run_dbt(['run'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'dev')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
@attr(type='postgres')
def test_env_vars_prod(self):
self.run_dbt(['run', '--target', 'prod'])
ctx = self.get_ctx_vars()
self.assertEqual(ctx['this'], '"context_vars_013"."context"')
self.assertEqual(ctx['this.name'], 'context')
self.assertEqual(ctx['this.schema'], 'context_vars_013')
self.assertEqual(ctx['this.table'], 'context')
self.assertEqual(ctx['target.dbname'], 'dbt')
self.assertEqual(ctx['target.host'], 'database')
self.assertEqual(ctx['target.name'], 'prod')
self.assertEqual(ctx['target.port'], 5432)
self.assertEqual(ctx['target.schema'], 'context_vars_013')
self.assertEqual(ctx['target.threads'], 1)
self.assertEqual(ctx['target.type'], 'postgres')
self.assertEqual(ctx['target.user'], 'root')
self.assertEqual(ctx['target.pass'], '')
| Python | 0 |
7674437d752be0791688533dd1409fa083672bb2 | Switch from dictionary to namedtuple | genes/java/config.py | genes/java/config.py | #!/usr/bin/env python
from collections import namedtuple
JavaConfig = namedtuple('JavaConfig', ['is_oracle', 'version'])
def config():
return JavaConfig(
is_oracle=True,
version='oracle-java8',
)
| #!/usr/bin/env python
def config():
return {
'is-oracle': True,
'version': 'oracle-java8',
}
| Python | 0.000003 |
becf684fc06890679f4c0cdfed1761962e16a343 | Make extra_context at browse_repository view not overriding provided variables | vcs/web/simplevcs/views/repository.py | vcs/web/simplevcs/views/repository.py | from django.contrib import messages
from django.template import RequestContext
from django.shortcuts import render_to_response
from vcs.exceptions import VCSError
def browse_repository(request, repository, template_name, revision=None,
node_path='', extra_context={}):
"""
Generic repository browser.
Provided context variables:
- ``repository``: same what was given
- ``changeset``: based on the given ``revision`` or tip if none given
- ``root``: repositorie's node on the given ``node_path``
"""
context = {}
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
try:
context.update(dict(
changeset = repository.get_changeset(),
root = repository.request(node_path, revision=revision),
))
except VCSError, err:
messages.error(request, str(err))
return render_to_response(template_name, context, RequestContext(request))
| from django.contrib import messages
from django.template import RequestContext
from django.shortcuts import render_to_response
from vcs.exceptions import VCSError
def browse_repository(request, repository, template_name, revision=None,
node_path='', extra_context={}):
"""
Generic repository browser.
Provided context variables:
- ``repository``: same what was given
- ``changeset``: based on the given ``revision`` or tip if none given
- ``root``: repositorie's node on the given ``node_path``
"""
context = {}
try:
context.update(dict(
changeset = repository.get_changeset(),
root = repository.request(node_path, revision=revision),
))
except VCSError, err:
messages.error(request, str(err))
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name, context, RequestContext(request))
| Python | 0.00001 |
7ecaeba33a4fe559f6122953581e533720cb2404 | Add select mkl libs (#22580) | var/spack/repos/builtin/packages/intel-oneapi-mkl/package.py | var/spack/repos/builtin/packages/intel-oneapi-mkl/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from sys import platform
from spack import *
class IntelOneapiMkl(IntelOneApiLibraryPackage):
"""Intel oneAPI MKL."""
maintainers = ['rscohn2']
homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html'
if platform == 'linux':
version('2021.1.1',
sha256='818b6bd9a6c116f4578cda3151da0612ec9c3ce8b2c8a64730d625ce5b13cc0c',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17402/l_onemkl_p_2021.1.1.52_offline.sh',
expand=False)
depends_on('intel-oneapi-tbb')
provides('fftw-api@3')
provides('scalapack')
provides('mkl')
provides('lapack')
provides('blas')
@property
def component_dir(self):
return 'mkl'
@property
def libs(self):
lib_path = '{0}/{1}/latest/lib/intel64'.format(self.prefix, self.component_dir)
mkl_libs = ['libmkl_intel_ilp64', 'libmkl_sequential', 'libmkl_core']
return find_libraries(mkl_libs, root=lib_path, shared=True, recursive=False)
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from sys import platform
from spack import *
class IntelOneapiMkl(IntelOneApiLibraryPackage):
"""Intel oneAPI MKL."""
maintainers = ['rscohn2']
homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html'
if platform == 'linux':
version('2021.1.1',
sha256='818b6bd9a6c116f4578cda3151da0612ec9c3ce8b2c8a64730d625ce5b13cc0c',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17402/l_onemkl_p_2021.1.1.52_offline.sh',
expand=False)
depends_on('intel-oneapi-tbb')
provides('fftw-api@3')
provides('scalapack')
provides('mkl')
provides('lapack')
provides('blas')
@property
def component_dir(self):
return 'mkl'
| Python | 0 |
67cea85323195440330580cc3731447956a4ad32 | add default user settings packet | litecord/managers/user_settings.py | litecord/managers/user_settings.py |
class SettingsManager:
"""User settings manager.
Provides functions for users to change their settings and retrieve them back.
Attributes
----------
server: :class:`LitecordServer`
Litecord server instance.
settings_coll: `mongo collection`
User settings MongoDB collection.
"""
def __init__(self, server):
self.server = server
self.guild_man = server.guild_man
self.settings_coll = self.server.settings_coll
async def get_settings(self, user):
"""Get a settings object from a User ID.
Parameters
----------
user_id: :class:`User`
User ID to be get settings from.
"""
if user.bot:
return {}
settings = await self.settings_coll.find_one({'user_id': user.id})
if settings is None:
settings = {
'timezone_offset': 0,
'theme': 'dark',
'status': 'online',
'show_current_game': False,
'restricted_guilds': [],
'render_reactions': True,
'render_embeds:': True,
'message_display_compact': True,
'locale': 'en-US',
'inline_embed_media': False,
'inline_attachment_media': False,
'guild_positions': [],
'friend_source_flags': {
'all': True,
},
'explicit_content_filter': 1,
'enable_tts_command': False,
'developer_mode': False,
'detect_platform_accounts': False,
'default_guilds_restricted': False,
'convert_emoticons': True,
'afk_timeout': 600,
}
return settings
async def get_guild_settings(self, user):
"""Get a User Guild Settings object to be used
in READY payloads.
Parameters
----------
user_id: :class:`User`
User ID to get User Guild Settings payload for.
Returns
-------
list
The User Guild Settings payload.
"""
if user.bot:
return []
res = []
async for guild in self.guild_man.yield_guilds(user.id):
res.append(guild.default_settings)
return res
|
class SettingsManager:
"""User settings manager.
Provides functions for users to change their settings and retrieve them back.
Attributes
----------
server: :class:`LitecordServer`
Litecord server instance.
settings_coll: `mongo collection`
User settings MongoDB collection.
"""
def __init__(self, server):
self.server = server
self.guild_man = server.guild_man
self.settings_coll = self.server.settings_coll
async def get_settings(self, user):
"""Get a settings object from a User ID.
Parameters
----------
user_id: :class:`User`
User ID to be get settings from.
"""
if user.bot:
return {}
settings = await self.settings_coll.find_one({'user_id': user.id})
if settings is None:
settings = {}
return settings
async def get_guild_settings(self, user):
"""Get a User Guild Settings object to be used
in READY payloads.
Parameters
----------
user_id: :class:`User`
User ID to get User Guild Settings payload for.
Returns
-------
list
The User Guild Settings payload.
"""
if user.bot:
return []
res = []
async for guild in self.guild_man.yield_guilds(user.id):
res.append(guild.default_settings)
return res
| Python | 0.000009 |
f5838692d711e6c8d0b8f0dc7716ea28707df4f2 | Add default for playlist_dir | beetsplug/smartplaylist.py | beetsplug/smartplaylist.py | # This file is part of beets.
# Copyright 2013, Dang Mai <contact@dangmai.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from __future__ import print_function
from beets.plugins import BeetsPlugin
from beets import config, ui
from beets.util import normpath, syspath
import os
# Global variables so that smartplaylist can detect database changes and run
# only once before beets exits.
database_changed = False
library = None
def update_playlists(lib):
from beets.util.functemplate import Template
print("Updating smart playlists...")
playlists = config['smartplaylist']['playlists'].get(list)
playlist_dir = config['smartplaylist']['playlist_dir'].get(unicode)
relative_to = config['smartplaylist']['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in playlists:
items = lib.items(playlist['query'])
m3us = {}
basename = playlist['name'].encode('utf8')
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(Template(basename), lib=lib)
if not (m3u_name in m3us):
m3us[m3u_name] = []
if relative_to:
m3us[m3u_name].append(os.path.relpath(item.path, relative_to))
else:
m3us[m3u_name].append(item.path)
# Now iterate through the m3us that we need to generate
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u))
import pdb; pdb.set_trace()
with open(syspath(m3u_path), 'w') as f:
for path in m3us[m3u]:
f.write(path + '\n')
print("... Done")
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlist_dir': u'.',
'playlists': []
})
def commands(self):
def update(lib, opts, args):
update_playlists(lib)
spl_update = ui.Subcommand('splupdate',
help='update the smart playlists')
spl_update.func = update
return [spl_update]
@SmartPlaylistPlugin.listen('database_change')
def handle_change(lib):
global library
global database_changed
library = lib
database_changed = True
@SmartPlaylistPlugin.listen('cli_exit')
def update():
if database_changed:
update_playlists(library)
| # This file is part of beets.
# Copyright 2013, Dang Mai <contact@dangmai.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from __future__ import print_function
from beets.plugins import BeetsPlugin
from beets import config, ui
from beets.util import normpath, syspath
import os
# Global variables so that smartplaylist can detect database changes and run
# only once before beets exits.
database_changed = False
library = None
def update_playlists(lib):
from beets.util.functemplate import Template
print("Updating smart playlists...")
playlists = config['smartplaylist']['playlists'].get(list)
playlist_dir = config['smartplaylist']['playlist_dir'].get(unicode)
relative_to = config['smartplaylist']['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in playlists:
items = lib.items(playlist['query'])
m3us = {}
basename = playlist['name'].encode('utf8')
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(Template(basename), lib=lib)
if not (m3u_name in m3us):
m3us[m3u_name] = []
if relative_to:
m3us[m3u_name].append(os.path.relpath(item.path, relative_to))
else:
m3us[m3u_name].append(item.path)
# Now iterate through the m3us that we need to generate
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u))
with open(syspath(m3u_path), 'w') as f:
for path in m3us[m3u]:
f.write(path + '\n')
print("... Done")
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlists': []
})
def commands(self):
def update(lib, opts, args):
update_playlists(lib)
spl_update = ui.Subcommand('splupdate',
help='update the smart playlists')
spl_update.func = update
return [spl_update]
@SmartPlaylistPlugin.listen('database_change')
def handle_change(lib):
global library
global database_changed
library = lib
database_changed = True
@SmartPlaylistPlugin.listen('cli_exit')
def update():
if database_changed:
update_playlists(library)
| Python | 0.000001 |
603a59785f24aa98662e72d954b3aa0521ad0629 | Make repeatability tests for severities specified by CLI | test/unit/vint/linting/config/test_config_cmdargs_source.py | test/unit/vint/linting/config/test_config_cmdargs_source.py | import unittest
from test.asserting.config_source import ConfigSourceAssertion
from vint.linting.config.config_cmdargs_source import ConfigCmdargsSource
from vint.linting.level import Level
class TestConfigFileSource(ConfigSourceAssertion, unittest.TestCase):
def test_get_config_dict(self):
env = {
'cmdargs': {
'verbose': True,
'style': True,
'warning': True,
'max-violations': 10,
},
}
expected_config_dict = {
'cmdargs': {
'verbose': True,
'severity': Level.WARNING,
'max-violations': 10,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
def test_get_config_dict_with_no_severity(self):
env = {'cmdargs': {}}
expected_config_dict = {'cmdargs': {}}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
def test_get_config_dict_with_severity_style_problem(self):
env = {
'cmdargs': {
'style_problem': True,
},
}
expected_config_dict = {
'cmdargs': {
'severity': Level.STYLE_PROBLEM,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
def test_get_config_dict_with_severity_warning(self):
env = {
'cmdargs': {
'warning': True,
},
}
expected_config_dict = {
'cmdargs': {
'severity': Level.WARNING,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
def test_get_config_dict_with_severity_error(self):
env = {
'cmdargs': {
'error': True,
},
}
expected_config_dict = {
'cmdargs': {
'severity': Level.ERROR,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
if __name__ == '__main__':
unittest.main()
| import unittest
from test.asserting.config_source import ConfigSourceAssertion
from vint.linting.config.config_cmdargs_source import ConfigCmdargsSource
from vint.linting.level import Level
class TestConfigFileSource(ConfigSourceAssertion, unittest.TestCase):
def test_get_config_dict(self):
expected_config_dict = {
'cmdargs': {
'verbose': True,
'severity': Level.WARNING,
'max-violations': 10,
},
}
env = {
'cmdargs': {
'verbose': True,
'style': True,
'warning': True,
'max-violations': 10,
},
}
config_source = self.initialize_config_source_with_env(ConfigCmdargsSource, env)
self.assertConfigDict(config_source, expected_config_dict)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
54cea5e302820c35025e1afc64b2058a48c5b174 | Implement pop in the data storage module | desertbot/datastore.py | desertbot/datastore.py | import json
import os
class DataStore(object):
def __init__(self, storagePath, defaultsPath):
self.storagePath = storagePath
self.defaultsPath = defaultsPath
self.data = {}
self.load()
def load(self):
# if a file data/defaults/<module>.json exists, it has priority on load
if os.path.exists(self.defaultsPath):
with open(self.defaultsPath) as storageFile:
self.data = json.load(storageFile)
# if not, use data/<network>/<module>.json instead
elif os.path.exists(self.storagePath):
with open(self.storagePath) as storageFile:
self.data = json.load(storageFile)
# if there's nothing, make sure the folder at least exists for the server-specific data files
else:
os.makedirs(os.path.dirname(self.storagePath), exist_ok=True)
def save(self):
# don't save empty files, to keep the data directories from filling up with pointless files
if len(self.data) != 0:
tmpFile = f"{self.storagePath}.tmp"
with open(tmpFile, "w") as storageFile:
storageFile.write(json.dumps(self.data, indent=4))
os.rename(tmpFile, self.storagePath)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
self.save()
def __contains__(self, key):
return key in self.data
def __delitem__(self, key):
del self.data[key]
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def get(self, key, defaultValue=None):
return self.data.get(key, defaultValue)
def pop(self, key):
data = self.data.pop(key)
self.save()
return data
| import json
import os
class DataStore(object):
def __init__(self, storagePath, defaultsPath):
self.storagePath = storagePath
self.defaultsPath = defaultsPath
self.data = {}
self.load()
def load(self):
# if a file data/defaults/<module>.json exists, it has priority on load
if os.path.exists(self.defaultsPath):
with open(self.defaultsPath) as storageFile:
self.data = json.load(storageFile)
# if not, use data/<network>/<module>.json instead
elif os.path.exists(self.storagePath):
with open(self.storagePath) as storageFile:
self.data = json.load(storageFile)
# if there's nothing, make sure the folder at least exists for the server-specific data files
else:
os.makedirs(os.path.dirname(self.storagePath), exist_ok=True)
def save(self):
# don't save empty files, to keep the data directories from filling up with pointless files
if len(self.data) != 0:
tmpFile = f"{self.storagePath}.tmp"
with open(tmpFile, "w") as storageFile:
storageFile.write(json.dumps(self.data, indent=4))
os.rename(tmpFile, self.storagePath)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
self.save()
def __contains__(self, key):
return key in self.data
def __delitem__(self, key):
del self.data[key]
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def get(self, key, defaultValue=None):
return self.data.get(key, defaultValue)
| Python | 0.000001 |
9f4fbb1db8e96a798bca1ae72d8cae8b90ba7d60 | Add Message.fields() shortcut to iterating message fields | venom/message.py | venom/message.py | from abc import ABCMeta
from collections import MutableMapping
from collections import OrderedDict
from typing import Any, Dict, Type, Iterable
from venom.fields import FieldDescriptor
from venom.util import meta
class OneOf(object):
def __init__(self, *choices):
self.choices = choices
# TODO helper functions.
def which(self):
raise NotImplementedError
def get(self) -> Any:
raise NotImplementedError
class MessageMeta(ABCMeta):
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
def __new__(metacls, name, bases, members):
cls = super(MessageMeta, metacls).__new__(metacls, name, bases, members)
cls.__fields__ = OrderedDict(getattr(cls, '__fields__') or ())
cls.__meta__, meta_changes = meta(bases, members)
cls.__meta__.wire_formats = {}
if not meta_changes.get('name', None):
cls.__meta__.name = name
for name, member in members.items():
if isinstance(member, FieldDescriptor):
cls.__fields__[name] = member
if member.name is None:
member.name = name
elif isinstance(member, OneOf):
cls.__meta__.one_of_groups += (name, member.choices)
return cls
class Message(MutableMapping, metaclass=MessageMeta):
__slots__ = ('_values',)
__fields__ = None # type: Dict[str, FieldDescriptor]
__meta__ = None # type: Dict[str, Any]
class Meta:
name = None
one_of_groups = ()
wire_formats = None
def __init__(self, *args, **kwargs):
if args:
self._values = {}
for value, key in zip(args, self.__fields__.keys()):
self._values[key] = value
for key, value in kwargs.items():
self._values[key] = value
else:
self._values = {key: value for key, value in kwargs.items()}
@classmethod
def fields(cls) -> Iterable[FieldDescriptor]:
return cls.__fields__.values()
@classmethod
def from_object(cls, obj):
kwargs = {}
for key, field in cls.__fields__.items():
if hasattr(obj, '__getitem__'):
try:
kwargs[key] = obj[key]
continue
except (IndexError, TypeError, KeyError):
pass
try:
kwargs[key] = getattr(obj, key)
except AttributeError:
pass
return cls(**kwargs)
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __delitem__(self, key):
del self._values[key]
def __contains__(self, key):
return key in self._values
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __repr__(self):
parts = []
for key in self.__fields__.keys():
if key in self._values:
parts.append('{}={}'.format(key, repr(self._values[key])))
return '{}({})'.format(self.__meta__.name, ', '.join(parts))
def one_of(*choices):
"""
Usage:::
class SearchRequest(Message):
query = one_of('name', 'id')
s = SearchRequest(id=123)
s.query.which() # 'id'
"""
return OneOf(choices)
class Empty(Message):
pass
def message_factory(name: str, fields: Dict[str, FieldDescriptor]) -> Type[Message]:
return type(name, (Message,), fields)
def get_or_default(message: Message, key: str, default: Any = None):
try:
return message[key]
except KeyError as e:
if key in message.__fields__:
if default is None:
return message.__fields__[key].default()
return default
raise e
| from abc import ABCMeta
from collections import MutableMapping
from collections import OrderedDict
from typing import Any, Dict, Type
from venom.fields import Field, FieldDescriptor
from venom.util import meta
class OneOf(object):
def __init__(self, *choices):
self.choices = choices
# TODO helper functions.
def which(self):
raise NotImplementedError
def get(self) -> Any:
raise NotImplementedError
class MessageMeta(ABCMeta):
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
def __new__(metacls, name, bases, members):
cls = super(MessageMeta, metacls).__new__(metacls, name, bases, members)
cls.__fields__ = OrderedDict(getattr(cls, '__fields__') or ())
cls.__meta__, meta_changes = meta(bases, members)
cls.__meta__.wire_formats = {}
if not meta_changes.get('name', None):
cls.__meta__.name = name
for name, member in members.items():
if isinstance(member, FieldDescriptor):
cls.__fields__[name] = member
if member.name is None:
member.name = name
elif isinstance(member, OneOf):
cls.__meta__.one_of_groups += (name, member.choices)
return cls
class Message(MutableMapping, metaclass=MessageMeta):
__slots__ = ('_values',)
__fields__ = None # type: Dict[str, Field]
__meta__ = None # type: Dict[str, Any]
class Meta:
name = None
one_of_groups = ()
wire_formats = None
def __init__(self, *args, **kwargs):
if args:
self._values = {}
for value, key in zip(args, self.__fields__.keys()):
self._values[key] = value
for key, value in kwargs.items():
self._values[key] = value
else:
self._values = {key: value for key, value in kwargs.items()}
@classmethod
def from_object(cls, obj):
kwargs = {}
for key, field in cls.__fields__.items():
if hasattr(obj, '__getitem__'):
try:
kwargs[key] = obj[key]
continue
except (IndexError, TypeError, KeyError):
pass
try:
kwargs[key] = getattr(obj, key)
except AttributeError:
pass
return cls(**kwargs)
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __delitem__(self, key):
del self._values[key]
def __contains__(self, key):
return key in self._values
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __repr__(self):
parts = []
for key in self.__fields__.keys():
if key in self._values:
parts.append('{}={}'.format(key, repr(self._values[key])))
return '{}({})'.format(self.__meta__.name, ', '.join(parts))
def one_of(*choices):
"""
Usage:::
class SearchRequest(Message):
query = one_of('name', 'id')
s = SearchRequest(id=123)
s.query.which() # 'id'
"""
return OneOf(choices)
class Empty(Message):
pass
def message_factory(name: str, fields: Dict[str, FieldDescriptor]) -> Type[Message]:
return type(name, (Message,), fields)
def get_or_default(message: Message, key: str, default: Any = None):
try:
return message[key]
except KeyError as e:
if key in message.__fields__:
if default is None:
return message.__fields__[key].default()
return default
raise e
| Python | 0.000003 |
e3753ac4b2c24c43014aab8121a34b9ad76d6b7a | update tests to v2.1.1 (#1597) (#1597) | exercises/hamming/hamming_test.py | exercises/hamming/hamming_test.py | import unittest
import hamming
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.1
class HammingTest(unittest.TestCase):
def test_empty_strands(self):
self.assertEqual(hamming.distance("", ""), 0)
def test_identical_strands(self):
self.assertEqual(hamming.distance("A", "A"), 0)
def test_long_identical_strands(self):
self.assertEqual(hamming.distance("GGACTGA", "GGACTGA"), 0)
def test_complete_distance_in_single_nucleotide_strands(self):
self.assertEqual(hamming.distance("A", "G"), 1)
def test_complete_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AG", "CT"), 2)
def test_small_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AT", "CT"), 1)
def test_small_distance(self):
self.assertEqual(hamming.distance("GGACG", "GGTCG"), 1)
def test_small_distance_in_long_strands(self):
self.assertEqual(hamming.distance("ACCAGGG", "ACTATGG"), 2)
def test_non_unique_character_in_first_strand(self):
self.assertEqual(hamming.distance("AAG", "AAA"), 1)
def test_non_unique_character_in_second_strand(self):
self.assertEqual(hamming.distance("AAA", "AAG"), 1)
def test_same_nucleotides_in_different_positions(self):
self.assertEqual(hamming.distance("TAG", "GAT"), 2)
def test_large_distance(self):
self.assertEqual(hamming.distance("GATACA", "GCATAA"), 4)
def test_large_distance_in_off_by_one_strand(self):
self.assertEqual(hamming.distance("GGACGGATTCTG", "AGGACGGATTCT"), 9)
def test_disallow_first_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("AATG", "AAA")
def test_disallow_second_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("ATA", "AGTG")
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| import unittest
import hamming
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class HammingTest(unittest.TestCase):
def test_empty_strands(self):
self.assertEqual(hamming.distance("", ""), 0)
def test_identical_strands(self):
self.assertEqual(hamming.distance("A", "A"), 0)
def test_long_identical_strands(self):
self.assertEqual(hamming.distance("GGACTGA", "GGACTGA"), 0)
def test_complete_distance_in_single_nucleotide_strands(self):
self.assertEqual(hamming.distance("A", "G"), 1)
def test_complete_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AG", "CT"), 2)
def test_small_distance_in_small_strands(self):
self.assertEqual(hamming.distance("AT", "CT"), 1)
def test_small_distance(self):
self.assertEqual(hamming.distance("GGACG", "GGTCG"), 1)
def test_small_distance_in_long_strands(self):
self.assertEqual(hamming.distance("ACCAGGG", "ACTATGG"), 2)
def test_non_unique_character_in_first_strand(self):
self.assertEqual(hamming.distance("AAG", "AAA"), 1)
def test_non_unique_character_in_second_strand(self):
self.assertEqual(hamming.distance("AAA", "AAG"), 1)
def test_same_nucleotides_in_different_positions(self):
self.assertEqual(hamming.distance("TAG", "GAT"), 2)
def test_large_distance(self):
self.assertEqual(hamming.distance("GATACA", "GCATAA"), 4)
def test_large_distance_in_off_by_one_strand(self):
self.assertEqual(hamming.distance("GGACGGATTCTG", "AGGACGGATTCT"), 9)
def test_disallow_first_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("AATG", "AAA")
def test_disallow_second_strand_longer(self):
with self.assertRaisesWithMessage(ValueError):
hamming.distance("ATA", "AGTG")
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| Python | 0 |
d47cfd7c1a4dd22ab175539dcb0e3702a21f8bb7 | Move scaling factors to constant and explain | ynr/apps/moderation_queue/management/commands/moderation_queue_detect_faces_in_queued_images.py | ynr/apps/moderation_queue/management/commands/moderation_queue_detect_faces_in_queued_images.py | import json
import boto3
from django.core.management.base import BaseCommand, CommandError
from moderation_queue.models import QueuedImage
# These magic values are because the AWS API crops faces quite tightly by
# default, meaning we literally just get the face. These values are about
# right or, they are more right than the default crop.
MIN_SCALING_FACTOR = 0.3
MAX_SCALING_FACTOR = 2
class Command(BaseCommand):
def handle(self, **options):
rekognition = boto3.client("rekognition", "eu-west-1")
attributes = ["ALL"]
any_failed = False
qs = QueuedImage.objects.filter(decision="undecided").exclude(
face_detection_tried=True
)
for qi in qs:
try:
detected = rekognition.detect_faces(
Image={"Bytes": qi.image.file.read()}, Attributes=attributes
)
self.set_x_y_from_response(qi, detected, options["verbosity"])
except Exception as e:
msg = "Skipping QueuedImage{id}: {error}"
self.stdout.write(msg.format(id=qi.id, error=e))
any_failed = True
qi.face_detection_tried = True
qi.save()
if any_failed:
raise CommandError("Broken images found (see above)")
def set_x_y_from_response(self, qi, detected, verbosity=0):
if detected and detected["FaceDetails"]:
im_width = qi.image.width
im_height = qi.image.height
bounding_box = detected["FaceDetails"][0]["BoundingBox"]
qi.crop_min_x = bounding_box["Left"] * im_width * MIN_SCALING_FACTOR
qi.crop_min_y = bounding_box["Top"] * im_height * MIN_SCALING_FACTOR
qi.crop_max_x = (
bounding_box["Width"] * im_width * MAX_SCALING_FACTOR
)
qi.crop_max_y = (
bounding_box["Height"] * im_height * MAX_SCALING_FACTOR
)
qi.detection_metadata = json.dumps(detected, indent=4)
if int(verbosity) > 1:
self.stdout.write("Set bounds of {}".format(qi))
else:
self.stdout.write("Couldn't find a face in {}".format(qi))
| import json
import boto3
from django.core.management.base import BaseCommand, CommandError
from moderation_queue.models import QueuedImage
class Command(BaseCommand):
def handle(self, **options):
rekognition = boto3.client("rekognition", "eu-west-1")
attributes = ["ALL"]
any_failed = False
qs = QueuedImage.objects.filter(decision="undecided").exclude(
face_detection_tried=True
)
for qi in qs:
try:
detected = rekognition.detect_faces(
Image={"Bytes": qi.image.file.read()}, Attributes=attributes
)
self.set_x_y_from_response(qi, detected, options["verbosity"])
except Exception as e:
msg = "Skipping QueuedImage{id}: {error}"
self.stdout.write(msg.format(id=qi.id, error=e))
any_failed = True
qi.face_detection_tried = True
qi.save()
if any_failed:
raise CommandError("Broken images found (see above)")
def set_x_y_from_response(self, qi, detected, verbosity=0):
if detected and detected["FaceDetails"]:
im_width = qi.image.width
im_height = qi.image.height
bounding_box = detected["FaceDetails"][0]["BoundingBox"]
qi.crop_min_x = bounding_box["Left"] * im_width * 0.3
qi.crop_min_y = bounding_box["Top"] * im_height * 0.3
qi.crop_max_x = bounding_box["Width"] * im_width * 2
qi.crop_max_y = bounding_box["Height"] * im_height * 2
qi.detection_metadata = json.dumps(detected, indent=4)
if int(verbosity) > 1:
self.stdout.write("Set bounds of {}".format(qi))
else:
self.stdout.write("Couldn't find a face in {}".format(qi))
| Python | 0 |
a0e0f7867e8e9805fb035a8db75e9d187fc06f3b | fix merge | rest_framework_social_oauth2/views.py | rest_framework_social_oauth2/views.py | # -*- coding: utf-8 -*-
import json
from braces.views import CsrfExemptMixin
from oauth2_provider.ext.rest_framework import OAuth2Authentication
from oauth2_provider.models import Application, AccessToken
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.mixins import OAuthLibMixin
from rest_framework import permissions
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from .oauth2_backends import KeepRequestCore
from .oauth2_endpoints import SocialTokenServer
class ConvertTokenView(CsrfExemptMixin, OAuthLibMixin, APIView):
"""
Implements an endpoint to provide access tokens
The endpoint is used in the following flows:
* Authorization code
* Password
* Client credentials
"""
server_class = SocialTokenServer
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
oauthlib_backend_class = KeepRequestCore
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
# Use the rest framework `.data` to fake the post body of the django request.
request._request.POST = request._request.POST.copy()
for key, value in request.data.iteritems():
request._request.POST[key] = value
url, headers, body, status = self.create_token_response(request._request)
response = Response(data=json.loads(body), status=status)
for k, v in headers.items():
response[k] = v
return response
@api_view(['POST'])
@authentication_classes([OAuth2Authentication])
@permission_classes([permissions.IsAuthenticated])
def invalidate_sessions(request):
client_id = request.POST.get("client_id", None)
if client_id is None:
return Response({
"client_id": ["This field is required."]
}, status=status.HTTP_400_BAD_REQUEST)
try:
app = Application.objects.get(client_id=client_id)
except Application.DoesNotExist:
return Response({
"detail": "The application linked to the provided client_id could not be found."
}, status=status.HTTP_400_BAD_REQUEST)
tokens = AccessToken.objects.filter(user=request.user, application=app)
tokens.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
| # -*- coding: utf-8 -*-
import json
from braces.views import CsrfExemptMixin
from oauth2_provider.ext.rest_framework import OAuth2Authentication
from oauth2_provider.models import Application, AccessToken
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.mixins import OAuthLibMixin
from rest_framework import permissions
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from .oauth2_backends import KeepRequestCore
from .oauth2_endpoints import SocialTokenServer
class ConvertTokenView(CsrfExemptMixin, OAuthLibMixin, APIView):
"""
Implements an endpoint to provide access tokens
The endpoint is used in the following flows:
* Authorization code
* Password
* Client credentials
"""
server_class = SocialTokenServer
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
oauthlib_backend_class = KeepRequestCore
def post(self, request, *args, **kwargs):
# Use the rest framework `.data` to fake the post body of the django request.
request._request.POST = request._request.POST.copy()
for key, value in request.data.iteritems():
request._request.POST[key] = value
url, headers, body, status = self.create_token_response(request._request)
response = Response(data=json.loads(body), status=status)
for k, v in headers.items():
response[k] = v
return response
@api_view(['POST'])
@authentication_classes([OAuth2Authentication])
@permission_classes([permissions.IsAuthenticated])
def invalidate_sessions(request):
client_id = request.POST.get("client_id", None)
if client_id is None:
return Response({
"client_id": ["This field is required."]
}, status=status.HTTP_400_BAD_REQUEST)
try:
app = Application.objects.get(client_id=client_id)
except Application.DoesNotExist:
return Response({
"detail": "The application linked to the provided client_id could not be found."
}, status=status.HTTP_400_BAD_REQUEST)
tokens = AccessToken.objects.filter(user=request.user, application=app)
tokens.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
| Python | 0.000001 |
e13a74ae4e1884017593143e01e8882d7e802d7b | clean up imports | src/compas_rhino/geometry/__init__.py | src/compas_rhino/geometry/__init__.py | """
********************************************************************************
geometry
********************************************************************************
.. currentmodule:: compas_rhino.geometry
"""
from __future__ import absolute_import
__all__ = []
| """
********************************************************************************
geometry
********************************************************************************
.. currentmodule:: compas_rhino.geometry
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
RhinoGeometry
RhinoBox
RhinoCircle
RhinoCone
RhinoCurve
RhinoCylinder
RhinoEllipse
RhinoLine
RhinoMesh
RhinoPlane
RhinoPoint
RhinoPolyline
RhinoSphere
RhinoSurface
RhinoVector
"""
from __future__ import absolute_import
from ._geometry import RhinoGeometry
from .box import RhinoBox
from .circle import RhinoCircle
from .cone import RhinoCone
from .curve import RhinoCurve
from .cylinder import RhinoCylinder
from .ellipse import RhinoEllipse
from .line import RhinoLine
from .mesh import RhinoMesh
from .plane import RhinoPlane
from .point import RhinoPoint
from .polyline import RhinoPolyline
from .sphere import RhinoSphere
from .surface import RhinoSurface
from .vector import RhinoVector
BaseRhinoGeometry = RhinoGeometry
__all__ = [
'RhinoGeometry',
'RhinoBox',
'RhinoCircle',
'RhinoCone',
'RhinoCurve',
'RhinoCylinder',
'RhinoEllipse',
'RhinoLine',
'RhinoMesh',
'RhinoPlane',
'RhinoPoint',
'RhinoPolyline',
'RhinoSphere',
'RhinoSurface',
'RhinoVector',
]
| Python | 0.000001 |
3fb93c4b839457430180f65f1feae4c7abdba0ac | tag celery syslog messages | dbaas/dbaas/celery.py | dbaas/dbaas/celery.py | from __future__ import absolute_import
import os
import logging
from datetime import timedelta
from celery import Celery
from django.conf import settings
from dbaas import celeryconfig
from logging.handlers import SysLogHandler
from celery.log import redirect_stdouts_to_logger
from celery.signals import after_setup_task_logger, after_setup_logger
def setup_log(**args):
# redirect stdout and stderr to logger
redirect_stdouts_to_logger(args['logger'])
# logs to local syslog
syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
# setting log level
syslog.setLevel(args['loglevel'])
# setting log format
formatter = logging.Formatter('dbaas: #celery %(name)s %(message)s')
syslog.setFormatter(formatter)
# add new handler to logger
args['logger'].addHandler(syslog)
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
LOG = logging.getLogger(__name__)
#set this variable to True to run celery tasks synchronously
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbaas.settings')
app = Celery('dbaas')
app.config_from_object(celeryconfig)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
LOG.debug('Request: {0!r}'.format(self.request)) | from __future__ import absolute_import
import os
import logging
from datetime import timedelta
from celery import Celery
from django.conf import settings
from dbaas import celeryconfig
from logging.handlers import SysLogHandler
from celery.log import redirect_stdouts_to_logger
from celery.signals import after_setup_task_logger, after_setup_logger
def setup_log(**args):
# redirect stdout and stderr to logger
redirect_stdouts_to_logger(args['logger'])
# logs to local syslog
syslog = SysLogHandler(address=settings.SYSLOG_FILE, facility=logging.handlers.SysLogHandler.LOG_LOCAL3)
# setting log level
syslog.setLevel(args['loglevel'])
# setting log format
formatter = logging.Formatter('dbaas: %(name)s %(message)s')
syslog.setFormatter(formatter)
# add new handler to logger
args['logger'].addHandler(syslog)
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
LOG = logging.getLogger(__name__)
#set this variable to True to run celery tasks synchronously
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbaas.settings')
app = Celery('dbaas')
app.config_from_object(celeryconfig)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
LOG.debug('Request: {0!r}'.format(self.request)) | Python | 0 |
c0f917c6098b18479a69fe129a0fd19d11f67df7 | Fix startup | src/btsoot.py | src/btsoot.py | #!/usr/bin/env python3.5
#MIT License
#
#Copyright (c) 2016 Paul Kramme
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
def main():
print("BTSOOT 0.1.0")
if __name__ == __name__:
try:
main()
except KeyboardInterrupt:
print("Stopping program.")
exit()
except Exception:
print("Unknown Critical Exception")
print("Quitting...")
| Python | 0.000004 | |
a468ad4a8f28fb7c88f56869ae68de7b4b55ff39 | Fix 'ResultSet has no len' bug in delete_insert_test | delete_insert_test.py | delete_insert_test.py | import random
import threading
import uuid
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from dtest import Tester
class DeleteInsertTest(Tester):
"""
Examines scenarios around deleting data and adding data back with the same key
"""
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
# Generate 1000 rows in memory so we can re-use the same ones over again:
self.groups = ['group1', 'group2', 'group3', 'group4']
self.rows = [(str(uuid.uuid1()), x, random.choice(self.groups)) for x in range(1000)]
def create_ddl(self, session, rf={'dc1': 2, 'dc2': 2}):
self.create_ks(session, 'delete_insert_search_test', rf)
session.execute('CREATE TABLE test (id uuid PRIMARY KEY, val1 text, group text)')
session.execute('CREATE INDEX group_idx ON test (group)')
def delete_group_rows(self, session, group):
"""Delete rows from a given group and return them"""
rows = [r for r in self.rows if r[2] == group]
ids = [r[0] for r in rows]
session.execute('DELETE FROM test WHERE id in (%s)' % ', '.join(ids))
return list(rows)
def insert_all_rows(self, session):
self.insert_some_rows(session, self.rows)
def insert_some_rows(self, session, rows):
for row in rows:
session.execute("INSERT INTO test (id, val1, group) VALUES (%s, '%s', '%s')" % row)
def delete_insert_search_test(self):
cluster = self.cluster
cluster.populate([2, 2]).start()
node1 = cluster.nodelist()[0]
session = self.cql_connection(node1)
session.consistency_level = 'LOCAL_QUORUM'
self.create_ddl(session)
# Create 1000 rows:
self.insert_all_rows(session)
# Delete all of group2:
deleted = self.delete_group_rows(session, 'group2')
# Put that group back:
self.insert_some_rows(session, rows=deleted)
# Verify that all of group2 is back, 20 times, in parallel
# querying across all nodes:
class ThreadedQuery(threading.Thread):
def __init__(self, connection):
threading.Thread.__init__(self)
self.connection = connection
def run(self):
session = self.connection
query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'", consistency_level=ConsistencyLevel.LOCAL_QUORUM)
rows = session.execute(query)
assert len(list(rows)) == len(deleted)
threads = []
for x in range(20):
conn = self.cql_connection(random.choice(cluster.nodelist()))
threads.append(ThreadedQuery(conn))
for t in threads:
t.start()
for t in threads:
t.join()
| import random
import threading
import uuid
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from dtest import Tester
class DeleteInsertTest(Tester):
"""
Examines scenarios around deleting data and adding data back with the same key
"""
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
# Generate 1000 rows in memory so we can re-use the same ones over again:
self.groups = ['group1', 'group2', 'group3', 'group4']
self.rows = [(str(uuid.uuid1()), x, random.choice(self.groups)) for x in range(1000)]
def create_ddl(self, session, rf={'dc1': 2, 'dc2': 2}):
self.create_ks(session, 'delete_insert_search_test', rf)
session.execute('CREATE TABLE test (id uuid PRIMARY KEY, val1 text, group text)')
session.execute('CREATE INDEX group_idx ON test (group)')
def delete_group_rows(self, session, group):
"""Delete rows from a given group and return them"""
rows = [r for r in self.rows if r[2] == group]
ids = [r[0] for r in rows]
session.execute('DELETE FROM test WHERE id in (%s)' % ', '.join(ids))
return rows
def insert_all_rows(self, session):
self.insert_some_rows(session, self.rows)
def insert_some_rows(self, session, rows):
for row in rows:
session.execute("INSERT INTO test (id, val1, group) VALUES (%s, '%s', '%s')" % row)
def delete_insert_search_test(self):
cluster = self.cluster
cluster.populate([2, 2]).start()
node1 = cluster.nodelist()[0]
session = self.cql_connection(node1)
session.consistency_level = 'LOCAL_QUORUM'
self.create_ddl(session)
# Create 1000 rows:
self.insert_all_rows(session)
# Delete all of group2:
deleted = self.delete_group_rows(session, 'group2')
# Put that group back:
self.insert_some_rows(session, rows=deleted)
# Verify that all of group2 is back, 20 times, in parallel
# querying across all nodes:
class ThreadedQuery(threading.Thread):
def __init__(self, connection):
threading.Thread.__init__(self)
self.connection = connection
def run(self):
session = self.connection
query = SimpleStatement("SELECT * FROM delete_insert_search_test.test WHERE group = 'group2'", consistency_level=ConsistencyLevel.LOCAL_QUORUM)
rows = session.execute(query)
assert len(rows) == len(deleted)
threads = []
for x in range(20):
conn = self.cql_connection(random.choice(cluster.nodelist()))
threads.append(ThreadedQuery(conn))
for t in threads:
t.start()
for t in threads:
t.join()
| Python | 0.000002 |
21620653125f33fd0d19c1bb2f16b51ec3c853f9 | fix tmin/tmax | ASC/SkyPie.py | ASC/SkyPie.py | #! /usr/bin/env python
#
# Takes about 15" for 1400 images on laptop with a local fast disk (100% cpu)
# But 60" on the Xeon, but at 300% cpu
#
import matplotlib.pyplot as plt
import numpy as np
import sys
table = sys.argv[1]
png = table + '.png'
twopi = 2*np.pi
# table of decimal hour time and median sky brightness (50,000 is very bright)
(t,s) = np.loadtxt(table).T
print("Sky: ",s.min(),s.max())
print("Time:",t.min(),t.max())
t0 = t[0]
t1 = t[-1]
print(t0,t1)
# tmin is the sunrise, from t1 (6), should be near 90
# tmax is the sunset, from t0 (18) 270
tmin = (6-t1)*15 + 90
tmax = (18-t0)*15 + 270
print(tmin,tmax)
x = (12-t) * twopi / 24.0
y = s.max()-s
print(x.min(),x.max())
print(y.min(),y.max())
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.plot(x, y)
ax.set_theta_zero_location('S')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
if False:
# always same pie, an extra hour either side
tmin=75
tmax=285
print(tmin,tmax)
ax.set_thetamin(tmin)
ax.set_thetamax(tmax)
ya = 0.2 * y
yb = 0.4 * y
yc = 0.8 * y
yd = 0.8 * y
ye = 0.9 * y
ax.fill_between(x,0, ya,facecolor='green',alpha=0.1)
ax.fill_between(x,ya,yb,facecolor='green',alpha=0.3)
ax.fill_between(x,yb,yc,facecolor='green',alpha=0.5)
ax.fill_between(x,yc,yd,facecolor='green',alpha=0.7)
ax.fill_between(x,yd,ye,facecolor='green',alpha=0.85)
ax.fill_between(x,ye,y ,facecolor='green',alpha=1)
# needs tweaking
plt.text(3.14,50000,'midnight',horizontalalignment='center')
plt.text(1.1,42000,'sunrise')
plt.text(5.1,48000,'sunset')
plt.text(5.5,20000,'imagine a moon')
plt.title("%s sky: %g %g %g-%g h" % (table,s.min(),s.max(),t0,t1))
plt.savefig(png)
plt.show()
print("Written ",png)
| #! /usr/bin/env python
#
# Takes about 15" fpr 1400 images on laptop with a local fast disk
#
import matplotlib.pyplot as plt
import numpy as np
import sys
date = ''
table = sys.argv[1]
png = table + '.png'
twopi = 2*np.pi
# table of time index (1...N) and median sky brightness (50,000 is very bright)
(t,s) = np.loadtxt(table).T
print("Sky: ",s.min(),s.max())
print("Time:",t.min(),t.max())
t0 = t[0]
t1 = t[-1]
print(t0,t1)
# degrees for polar plot
tmin = (t0-12.0)*180/12.0
tmax = 360 - (12-t1)*180/12.0
x = (12+24-t) * twopi / 24.0
y = s.max()-s
print(x.min(),x.max())
print(y.min(),y.max())
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.plot(x, y)
ax.set_theta_zero_location('S')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_thetamin(tmin)
ax.set_thetamax(tmax)
ya = 0.2 * y
yb = 0.4 * y
yc = 0.8 * y
yd = 0.8 * y
ye = 0.9 * y
ax.fill_between(x,0, ya,facecolor='green',alpha=0.1)
ax.fill_between(x,ya,yb,facecolor='green',alpha=0.3)
ax.fill_between(x,yb,yc,facecolor='green',alpha=0.5)
ax.fill_between(x,yc,yd,facecolor='green',alpha=0.7)
ax.fill_between(x,yd,ye,facecolor='green',alpha=0.85)
ax.fill_between(x,ye,y ,facecolor='green',alpha=1)
# needs tweaking
plt.text(3.14,50000,'midnight',horizontalalignment='center')
plt.text(1.1,42000,'sunrise')
plt.text(5.1,48000,'sunset')
plt.text(5.5,20000,'imagine a moon')
plt.title("%s sky: %g %g %g-%g h" % (table,s.min(),s.max(),t0,t1))
plt.savefig(png)
plt.show()
print("Written ",png)
| Python | 0.00001 |
d84034db71abac46ef765f1640f3efa6712f5c42 | Update RegisterHandler.py | Handlers/RegisterHandler.py | Handlers/RegisterHandler.py | # -*- coding: utf-8 -*-
import logging
from Handlers.BaseHandler import BaseHandler
from Tools import PostgreSQL, VerifyFields
logger = logging.getLogger(__name__)
class RegisterHandler(BaseHandler):
"""handle / endpoint"""
def get(self):
"""Serve Get and return main page"""
self.render('register.html')
def post(self):
"""Get user completed form and verify it before save it"""
prenom = self.get_body_argument('prenom')
nom = self.get_body_argument('nom')
courriel = self.get_body_argument('courriel')
genre = self.get_body_argument('genre')
promotion = int(self.get_body_argument('promotion'))
if VerifyFields.verify_all(prenom, nom, courriel, genre, promotion):
PostgreSQL.insert_inscrit(prenom, nom, genre, courriel, promotion)
self.render('registered.html')
else:
self.send_error(status_code=400)
| # -*- coding: utf-8 -*-
import logging
from Handlers.BaseHandler import BaseHandler
from Tools import PostgreSQL, VerifyFields
logger = logging.getLogger(__name__)
class RegisterHandler(BaseHandler):
"""handle / endpoint"""
def initialize(self):
self.conn = PostgreSQL.get_session()
def get(self):
"""Serve Get and return main page"""
self.render('register.html')
def post(self):
"""Get user completed form and verify it before save it"""
prenom = self.get_body_argument('prenom')
nom = self.get_body_argument('nom')
courriel = self.get_body_argument('courriel')
genre = self.get_body_argument('genre')
promotion = int(self.get_body_argument('promotion'))
if VerifyFields.verify_all(prenom, nom, courriel, genre, promotion):
PostgreSQL.insert_inscrit(prenom, nom, genre, courriel, promotion)
self.render('registered.html')
else:
self.send_error(status_code=400)
| Python | 0.000001 |
a7160ce9345b14e656ce702b187048347b843811 | update test_with_count_combination.py | tests/unit/selection/modules/test_with_count_combination.py | tests/unit/selection/modules/test_with_count_combination.py | # Tai Sakuma <tai.sakuma@gmail.com>
import itertools
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.modules import AllwCount, AnywCount, NotwCount
##__________________________________________________________________||
class MockEventSelection(object):
def begin(self, event): pass
def __call__(self, event): pass
def end(self): pass
##__________________________________________________________________||
@pytest.fixture()
def tree():
# all0 - all1 --- all2 --- sel1
# | +- sel2
# +- not1 --- any1 --- all3 --- sel3
# | +- sel4
# +- sel5
sel1 = mock.Mock(spec=MockEventSelection)
sel1.name ='sel1'
sel2 = mock.Mock(spec=MockEventSelection)
sel2.name ='sel2'
sel3 = mock.Mock(spec=MockEventSelection)
sel3.name ='sel3'
sel4 = mock.Mock(spec=MockEventSelection)
sel4.name ='sel4'
sel5 = mock.Mock(spec=MockEventSelection)
sel5.name ='sel5'
all0 = AllwCount(name='all0')
all1 = AllwCount(name='all1')
all2 = AllwCount(name='all2')
all3 = AllwCount(name='all3')
any1 = AnywCount(name='any1')
all3.add(sel3)
all3.add(sel4)
any1.add(all3)
any1.add(sel5)
not1 = NotwCount(any1, name='not1')
all2.add(sel1)
all2.add(sel2)
all1.add(all2)
all1.add(not1)
all0.add(all1)
return dict(
alls=(all0, all1, all2, all3),
anys=(any1, ),
nots=(not1, ),
sels=(sel1, sel2, sel3, sel4, sel5)
)
##__________________________________________________________________||
def test_combination(tree):
all0 = tree['alls'][0]
sels = tree['sels']
event = mock.Mock()
all0.begin(event)
all_possible_results = itertools.product(*[[True, False]]*len(sels))
# e.g.,
# [
# (True, True, True, True, True),
# (True, True, True, True, False),
# ...
# (False, False, False, False, False)
# ]
for l in all_possible_results:
# e.g. l = (True, True, False, True, False)
for sel, ret in zip(sels, l):
sel.return_value = ret
all0(event)
all0.end()
count = all0.results()
assert [
[1, 'AllwCount', 'all1', 3, 32],
[2, 'AllwCount', 'all2', 8, 32],
[3, 'MockEventSelection', 'sel1', 16, 32],
[3, 'MockEventSelection', 'sel2', 8, 16],
[2 , 'NotwCount', 'not1', 3, 8],
[3, 'AnywCount', 'any1', 5, 8],
[4, 'AllwCount', 'all3', 2, 8],
[5, 'MockEventSelection', 'sel3', 4, 8],
[5, 'MockEventSelection', 'sel4', 2, 4],
[4, 'MockEventSelection', 'sel5', 3, 6]
] == count._results
##__________________________________________________________________||
| # Tai Sakuma <tai.sakuma@gmail.com>
import itertools
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.modules import AllwCount, AnywCount, NotwCount
##__________________________________________________________________||
class MockEventSelection(object):
def begin(self, event): pass
def __call__(self, event): pass
def end(self): pass
##__________________________________________________________________||
def test_combination():
# all0 - all1 --- all2 --- sel1
# | +- sel2
# +- not1 --- any1 --- all3 --- sel3
# | +- sel4
# +- sel5
sel1 = mock.Mock(spec=MockEventSelection)
sel1.name ='sel1'
sel2 = mock.Mock(spec=MockEventSelection)
sel2.name ='sel2'
sel3 = mock.Mock(spec=MockEventSelection)
sel3.name ='sel3'
sel4 = mock.Mock(spec=MockEventSelection)
sel4.name ='sel4'
sel5 = mock.Mock(spec=MockEventSelection)
sel5.name ='sel5'
all0 = AllwCount(name='all0')
all1 = AllwCount(name='all1')
all2 = AllwCount(name='all2')
all3 = AllwCount(name='all3')
any1 = AnywCount(name='any1')
all3.add(sel3)
all3.add(sel4)
any1.add(all3)
any1.add(sel5)
not1 = NotwCount(any1, name='not1')
all2.add(sel1)
all2.add(sel2)
all1.add(all2)
all1.add(not1)
all0.add(all1)
event = mock.Mock()
all0.begin(event)
for l in itertools.product(*[[True, False]]*5):
sel1.return_value = l[0]
sel2.return_value = l[1]
sel3.return_value = l[2]
sel4.return_value = l[3]
sel5.return_value = l[4]
all0(event)
all0.end()
count = all0.results()
assert [
[1, 'AllwCount', 'all1', 3, 32],
[2, 'AllwCount', 'all2', 8, 32],
[3, 'MockEventSelection', 'sel1', 16, 32],
[3, 'MockEventSelection', 'sel2', 8, 16],
[2 , 'NotwCount', 'not1', 3, 8],
[3, 'AnywCount', 'any1', 5, 8],
[4, 'AllwCount', 'all3', 2, 8],
[5, 'MockEventSelection', 'sel3', 4, 8],
[5, 'MockEventSelection', 'sel4', 2, 4],
[4, 'MockEventSelection', 'sel5', 3, 6]
] == count._results
##__________________________________________________________________||
| Python | 0.000002 |
19b251a41ad26d7dabaf571b9bb90b82b9108d4b | fix hostname config issue | vint/cerf_api.py | vint/cerf_api.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import json
from urlparse import urljoin
import requests
__author__ = 'tchen'
logger = logging.getLogger(__name__)
DEFAULT_HOSTNAME = 'http://exam.tchen.me'
#DEFAULT_HOSTNAME = 'http://localhost:8000'
class Request(object):
hostname = ''
api_path = '/'
def __init__(self, authcode):
from misc import config
self.authcode = authcode
self.api_base = self.hostname + self.api_path
if config:
try:
self.hostname = config.get('global', 'host')
except:
pass
def retrieve(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.get(url, data={'authcode': self.authcode})
return json.loads(r.text)
except:
return {}
def delete(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.delete(url, data={'authcode': self.authcode})
if r.status_code == requests.codes.no_content:
return True
return False
except:
return False
class Cerf(object):
def __init__(self, id, authcode, hostname=DEFAULT_HOSTNAME):
from misc import config
self.id = id
self.authcode = authcode
self.hostname = hostname
if config:
try:
self.hostname = config.get('global', 'host')
print 'Host name is: %s' % self.hostname
except:
pass
self.interview = Interview(authcode, id)
self.exam = Exam(authcode)
self.answer = Answer(authcode)
class Interview(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/interviews/'
def __init__(self, authcode, id):
super(Interview, self).__init__(authcode)
self.id = id
def update(self, action, id=None, authcode=None):
id = id or self.id
authcode = authcode or self.authcode
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.put(url, data={'authcode': authcode, 'action': action})
return json.loads(r.text)
except:
return {}
def start(self, id=None, authcode=None):
return self.update('start', id, authcode)
def finish(self, id=None, authcode=None):
return self.update('finish', id, authcode)
def reset(self, id=None, authcode=None):
return self.update('reset', id, authcode)
class Exam(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/exams/'
class Answer(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/answers/'
def create(self, data):
headers = {'Content-type': 'application/json', 'Accept': '*/*'}
try:
r = requests.post(self.api_base + '?authcode=%s' % self.authcode,
data=json.dumps(data), headers=headers)
if r.status_code != requests.codes.created:
return {}
return json.loads(r.text)
except Exception:
return {}
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import json
from urlparse import urljoin
import requests
__author__ = 'tchen'
logger = logging.getLogger(__name__)
DEFAULT_HOSTNAME = 'http://exam.tchen.me'
#DEFAULT_HOSTNAME = 'http://localhost:8000'
class Request(object):
hostname = ''
api_path = '/'
def __init__(self, authcode):
self.authcode = authcode
self.api_base = self.hostname + self.api_path
def retrieve(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.get(url, data={'authcode': self.authcode})
return json.loads(r.text)
except:
return {}
def delete(self, id):
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.delete(url, data={'authcode': self.authcode})
if r.status_code == requests.codes.no_content:
return True
return False
except:
return False
class Cerf(object):
def __init__(self, id, authcode, hostname=DEFAULT_HOSTNAME):
from misc import config
self.id = id
self.authcode = authcode
self.hostname = hostname
if config:
try:
self.hostname = config.get('global', 'host')
except:
pass
self.interview = Interview(authcode, id)
self.exam = Exam(authcode)
self.answer = Answer(authcode)
class Interview(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/interviews/'
def __init__(self, authcode, id):
super(Interview, self).__init__(authcode)
self.id = id
def update(self, action, id=None, authcode=None):
id = id or self.id
authcode = authcode or self.authcode
url = urljoin(self.api_base, str(id)) + '/'
try:
r = requests.put(url, data={'authcode': authcode, 'action': action})
return json.loads(r.text)
except:
return {}
def start(self, id=None, authcode=None):
return self.update('start', id, authcode)
def finish(self, id=None, authcode=None):
return self.update('finish', id, authcode)
def reset(self, id=None, authcode=None):
return self.update('reset', id, authcode)
class Exam(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/exams/'
class Answer(Request):
hostname = DEFAULT_HOSTNAME
api_path = '/api/answers/'
def create(self, data):
headers = {'Content-type': 'application/json', 'Accept': '*/*'}
try:
r = requests.post(self.api_base + '?authcode=%s' % self.authcode,
data=json.dumps(data), headers=headers)
if r.status_code != requests.codes.created:
return {}
return json.loads(r.text)
except Exception:
return {}
| Python | 0.000004 |
98d0e2d5aaff184afc598bef67491632c0eab066 | Add save_load and get_load to mongo returner | salt/returners/mongo_future_return.py | salt/returners/mongo_future_return.py | '''
Return data to a mongodb server
Required python modules: pymongo
This returner will send data from the minions to a MongoDB server. To
configure the settings for your MongoDB server, add the following lines
to the minion config files::
mongo.db: <database name>
mongo.host: <server ip address>
mongo.user: <MongoDB username>
mongo.password: <MongoDB user password>
mongo.port: 27017
This mongo returner is being developed to replace the default mongodb returner
in the future and should not be considered api stable yet.
'''
# Import python libs
import logging
# Import third party libs
try:
import pymongo
has_pymongo = True
except ImportError:
has_pymongo = False
log = logging.getLogger(__name__)
def __virtual__():
if not has_pymongo:
return False
return 'mongo'
def _remove_dots(d):
output = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = _remove_dots(v)
output[k.replace('.', '-')] = v
return output
def _get_conn():
'''
Return a mongodb connection object
'''
conn = pymongo.Connection(
__salt__['config.option']('mongo.host'),
__salt__['config.option']('mongo.port'))
db = conn[__salt__['config.option']('mongo.db')]
user = __salt__['config.option']('mongo.user')
password = __salt__['config.option']('mongo.password')
if user and password:
db.authenticate(user, password)
return conn, db
def returner(ret):
'''
Return data to a mongodb server
'''
conn, db = _get_conn()
col = db[ret['id']]
back = {}
if isinstance(ret['return'], dict):
back = _remove_dots(ret['return'])
else:
back = ret['return']
log.debug(back)
sdata = {ret['jid']: back, 'fun': ret['fun']}
if 'out' in ret:
sdata['out'] = ret['out']
col.insert(sdata)
def save_load(jid, load):
'''
Save the load for a given job id
'''
conn, db = _get_conn()
col = db[jid]
col.insert(load)
def get_load(jid):
'''
Returnt he load asociated with a given job id
'''
conn, db = _get_conn()
return db[jid].find_one()
def get_jid(jid):
'''
Return the return information associated with a jid
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({jid: {'$exists': 'true'}})
if rdata:
ret[collection] = rdata
return ret
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({'fun': fun})
if rdata:
ret[collection] = rdata
return ret
| '''
Return data to a mongodb server
Required python modules: pymongo
This returner will send data from the minions to a MongoDB server. To
configure the settings for your MongoDB server, add the following lines
to the minion config files::
mongo.db: <database name>
mongo.host: <server ip address>
mongo.user: <MongoDB username>
mongo.password: <MongoDB user password>
mongo.port: 27017
This mongo returner is being developed to replace the default mongodb returner
in the future and should not be considered api stable yet.
'''
# Import python libs
import logging
# Import third party libs
try:
import pymongo
has_pymongo = True
except ImportError:
has_pymongo = False
log = logging.getLogger(__name__)
def __virtual__():
if not has_pymongo:
return False
return 'mongo'
def _remove_dots(d):
output = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = _remove_dots(v)
output[k.replace('.', '-')] = v
return output
def _get_conn():
'''
Return a mongodb connection object
'''
conn = pymongo.Connection(
__salt__['config.option']('mongo.host'),
__salt__['config.option']('mongo.port'))
db = conn[__salt__['config.option']('mongo.db')]
user = __salt__['config.option']('mongo.user')
password = __salt__['config.option']('mongo.password')
if user and password:
db.authenticate(user, password)
return conn, db
def returner(ret):
'''
Return data to a mongodb server
'''
conn, db = _get_conn()
col = db[ret['id']]
back = {}
if isinstance(ret['return'], dict):
back = _remove_dots(ret['return'])
else:
back = ret['return']
log.debug(back)
sdata = {ret['jid']: back, 'fun': ret['fun']}
if 'out' in ret:
sdata['out'] = ret['out']
col.insert(sdata)
def get_jid(jid):
'''
Return the return information associated with a jid
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({jid: {'$exists': 'true'}})
if rdata:
ret[collection] = rdata
return ret
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, db = _get_conn()
ret = {}
for collection in db.collection_names():
rdata = db[collection].find_one({'fun': fun})
if rdata:
ret[collection] = rdata
return ret
| Python | 0 |
0aaa9000f8cf545bd5bfa41b6538d56c91dbde97 | Update base box in sample config too | sampleconfigs/makebs.config.sample.py | sampleconfigs/makebs.config.sample.py | #!/usr/bin/env python2
# You will need to alter these before running ./makebuildserver
# Name of the base box to use...
basebox = "testing32"
# Location where raring32.box can be found, if you don't already have
# it. For security reasons, it's recommended that you make your own
# in a secure environment using trusted media (see the manual) but
# you can use this default if you like...
baseboxurl = "https://f-droid.org/testing32.box"
memory = 3584
# Debian package proxy server - set this to None unless you have one...
aptproxy = "http://192.168.0.19:8000"
# Set to True if your base box is 64 bit...
arch64 = False
| #!/usr/bin/env python2
# You will need to alter these before running ./makebuildserver
# Name of the base box to use...
basebox = "raring32"
# Location where raring32.box can be found, if you don't already have
# it. Could be set to https://f-droid.org/raring32.box if you like...
baseboxurl = "/shares/software/OS and Boot/raring32.box"
memory = 3584
# Debian package proxy server - set this to None unless you have one...
aptproxy = "http://192.168.0.19:8000"
# Set to True if your base box is 64 bit...
arch64 = False
| Python | 0 |
f5ea4c44480b0beafd7d22b50228d50d3130e7a2 | support for deleting files when using utils-process in auto-mode | utils/process.py | utils/process.py | #!/usr/bin/env python
# Copyright (C) 2010-2014 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import time
import logging
import argparse
import multiprocessing
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.core.database import Database, TASK_REPORTED, TASK_COMPLETED
from lib.cuckoo.core.database import TASK_FAILED_PROCESSING
from lib.cuckoo.core.plugins import RunProcessing, RunSignatures, RunReporting
from lib.cuckoo.core.startup import init_modules
def process(aid, target=None, copy_path=None, report=False, auto=False):
results = RunProcessing(task_id=aid).run()
RunSignatures(results=results).run()
if report:
RunReporting(task_id=aid, results=results).run()
Database().set_status(aid, TASK_REPORTED)
if auto:
if cfg.cuckoo.delete_original and os.path.exists(target):
os.unlink(target)
if cfg.cuckoo.delete_bin_copy and os.path.exists(copy_path):
os.unlink(copy_path)
def autoprocess(parallel=1):
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
pool = multiprocessing.Pool(parallel)
pending_results = []
# CAUTION - big ugly loop ahead
while count < maxcount or not maxcount:
# pending_results maintenance
for ar, tid, target, copy_path in list(pending_results):
if ar.ready():
if ar.successful():
log.info("Task #%d: reports generation completed", tid)
else:
try:
ar.get()
except:
log.exception("Exception when processing task ID %u.", tid)
db.set_status(tid, TASK_FAILED_PROCESSING)
pending_results.remove((ar, tid, target, copy_path))
# if still full, don't add more (necessary despite pool)
if len(pending_results) >= parallel:
time.sleep(1)
continue
# if we're here, getting #parallel tasks should at least have one we don't know
tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel)
# for loop to add only one, nice
for task in tasks:
# not-so-efficient lock
if task.id in [tid for ar, tid, target, copy_path
in pending_results]:
continue
log.info("Processing analysis data for Task #%d", task.id)
copy_path = os.path.join(CUCKOO_ROOT, "storage", "binaries",
task.sample.sha256)
args = task.id, task.target, copy_path
kwargs = dict(report=True, auto=True)
result = pool.apply_async(process, args, kwargs)
pending_results.append((result, task.id, task.target, copy_path))
count += 1
break
# if there wasn't anything to add, sleep tight
if not tasks:
time.sleep(5)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("id", type=str, help="ID of the analysis to process (auto for continuous processing of unprocessed tasks).")
parser.add_argument("-d", "--debug", help="Display debug messages", action="store_true", required=False)
parser.add_argument("-r", "--report", help="Re-generate report", action="store_true", required=False)
parser.add_argument("-p", "--parallel", help="Number of parallel threads to use (auto mode only).", type=int, required=False, default=1)
args = parser.parse_args()
if args.debug:
log.setLevel(logging.DEBUG)
init_modules()
if args.id == "auto":
autoprocess(parallel=args.parallel)
else:
process(args.id, report=args.report)
if __name__ == "__main__":
cfg = Config()
try:
main()
except KeyboardInterrupt:
pass
| #!/usr/bin/env python
# Copyright (C) 2010-2014 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import time
import logging
import argparse
import multiprocessing
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from lib.cuckoo.common.config import Config
from lib.cuckoo.core.database import Database, TASK_REPORTED, TASK_COMPLETED
from lib.cuckoo.core.database import TASK_FAILED_PROCESSING
from lib.cuckoo.core.plugins import RunProcessing, RunSignatures, RunReporting
from lib.cuckoo.core.startup import init_modules
def process(aid, report=False):
results = RunProcessing(task_id=aid).run()
RunSignatures(results=results).run()
if report:
RunReporting(task_id=aid, results=results).run()
Database().set_status(aid, TASK_REPORTED)
def autoprocess(parallel=1):
cfg = Config()
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
pool = multiprocessing.Pool(parallel)
pending_results = []
# CAUTION - big ugly loop ahead
while count < maxcount or not maxcount:
# pending_results maintenance
for ar, tid in list(pending_results):
if ar.ready():
if ar.successful():
log.info("Task #%d: reports generation completed", tid)
else:
try:
ar.get()
except:
log.exception("Exception when processing task ID %u.", tid)
db.set_status(tid, TASK_FAILED_PROCESSING)
pending_results.remove((ar, tid))
# if still full, don't add more (necessary despite pool)
if len(pending_results) >= parallel:
time.sleep(1)
continue
# if we're here, getting #parallel tasks should at least have one we don't know
tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel)
# for loop to add only one, nice
for task in tasks:
# not-so-efficient lock
if task.id in [tid for ar, tid in pending_results]:
continue
log.info("Processing analysis data for Task #%d", task.id)
result = pool.apply_async(process, (task.id,), {"report": True})
pending_results.append((result, task.id))
count += 1
break
# if there wasn't anything to add, sleep tight
if not tasks:
time.sleep(5)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("id", type=str, help="ID of the analysis to process (auto for continuous processing of unprocessed tasks).")
parser.add_argument("-d", "--debug", help="Display debug messages", action="store_true", required=False)
parser.add_argument("-r", "--report", help="Re-generate report", action="store_true", required=False)
parser.add_argument("-p", "--parallel", help="Number of parallel threads to use (auto mode only).", type=int, required=False, default=1)
args = parser.parse_args()
if args.debug:
log.setLevel(logging.DEBUG)
init_modules()
if args.id == "auto":
autoprocess(parallel=args.parallel)
else:
process(args.id, report=args.report)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| Python | 0 |
bcf6e41b489b5447186c063193d32714856bdfc7 | Add some example docs | InvenTree/plugin/samples/integration/custom_panel_sample.py | InvenTree/plugin/samples/integration/custom_panel_sample.py | """
Sample plugin which renders custom panels on certain pages
"""
from plugin import IntegrationPluginBase
from plugin.mixins import PanelMixin
from part.views import PartDetail
from stock.views import StockLocationDetail
class CustomPanelSample(PanelMixin, IntegrationPluginBase):
"""
A sample plugin which renders some custom panels.
"""
PLUGIN_NAME = "CustomPanelExample"
PLUGIN_SLUG = "panel"
PLUGIN_TITLE = "Custom Panel Example"
def render_location_info(self, loc):
"""
Demonstrate that we can render information particular to a page
"""
return f"""
<h5>Location Information</h5>
<em>This location has no sublocations!</em>
<ul>
<li><b>Name</b>: {loc.name}</li>
<li><b>Path</b>: {loc.pathstring}</li>
</ul>
"""
def get_custom_panels(self, view, request):
"""
You can decide at run-time which custom panels you want to display!
- Display on every page
- Only on a single page or set of pages
- Only for a specific instance (e.g. part)
- Based on the user viewing the page!
"""
panels = [
{
# This 'hello world' panel will be displayed on any view which implements custom panels
'title': 'Hello World',
'icon': 'fas fa-boxes',
'content': '<b>Hello world!</b>',
'description': 'A simple panel which renders hello world',
'javascript': 'alert("Hello world");',
},
{
# This panel will not be displayed, as it is missing the 'content' key
'title': 'No Content',
}
]
# This panel will *only* display on the PartDetail view
if isinstance(view, PartDetail):
panels.append({
'title': 'Custom Part Panel',
'icon': 'fas fa-shapes',
'content': '<em>This content only appears on the PartDetail page, you know!</em>',
})
# This panel will *only* display on the StockLocation view,
# and *only* if the StockLocation has *no* child locations
if isinstance(view, StockLocationDetail):
try:
loc = view.get_object()
if not loc.get_descendants(include_self=False).exists():
panels.append({
'title': 'Childless Location',
'icon': 'fa-user',
'content': self.render_location_info(loc),
})
except:
pass
return panels
| """
Sample plugin which renders custom panels on certain pages
"""
from plugin import IntegrationPluginBase
from plugin.mixins import PanelMixin
from part.views import PartDetail
from stock.views import StockLocationDetail
class CustomPanelSample(PanelMixin, IntegrationPluginBase):
"""
A sample plugin which renders some custom panels.
"""
PLUGIN_NAME = "CustomPanelExample"
PLUGIN_SLUG = "panel"
PLUGIN_TITLE = "Custom Panel Example"
def render_location_info(self, loc):
"""
Demonstrate that we can render information particular to a page
"""
return f"""
<h5>Location Information</h5>
<em>This location has no sublocations!</em>
<ul>
<li><b>Name</b>: {loc.name}</li>
<li><b>Path</b>: {loc.pathstring}</li>
</ul>
"""
def get_custom_panels(self, view, request):
panels = [
{
# This 'hello world' panel will be displayed on any view which implements custom panels
'title': 'Hello World',
'icon': 'fas fa-boxes',
'content': '<b>Hello world!</b>',
'description': 'A simple panel which renders hello world',
'javascript': 'alert("Hello world");',
},
{
# This panel will not be displayed, as it is missing the 'content' key
'title': 'No Content',
}
]
# This panel will *only* display on the PartDetail view
if isinstance(view, PartDetail):
panels.append({
'title': 'Custom Part Panel',
'icon': 'fas fa-shapes',
'content': '<em>This content only appears on the PartDetail page, you know!</em>',
})
# This panel will *only* display on the StockLocation view,
# and *only* if the StockLocation has *no* child locations
if isinstance(view, StockLocationDetail):
try:
loc = view.get_object()
if not loc.get_descendants(include_self=False).exists():
panels.append({
'title': 'Childless Location',
'icon': 'fa-user',
'content': self.render_location_info(loc),
})
except:
pass
return panels
| Python | 0.000001 |
e267bf384a33e17c48101e4726975dae31a88563 | Fix docstring for correlations. | scikits/talkbox/tools/correlations.py | scikits/talkbox/tools/correlations.py | import numpy as np
from scipy.fftpack import fft, ifft
__all__ = ['nextpow2', 'acorr']
def nextpow2(n):
"""Return the next power of 2 such as 2^p >= n.
Notes
-----
Infinite and nan are left untouched, negative values are not allowed."""
if np.any(n < 0):
raise ValueError("n should be > 0")
if np.isscalar(n):
f, p = np.frexp(n)
if f == 0.5:
return p-1
elif np.isfinite(f):
return p
else:
return f
else:
f, p = np.frexp(n)
res = f
bet = np.isfinite(f)
exa = (f == 0.5)
res[bet] = p[bet]
res[exa] = p[exa] - 1
return res
def _acorr_last_axis(x, nfft, maxlag, onesided=False, scale='none'):
a = np.real(ifft(np.abs(fft(x, n=nfft) ** 2)))
if onesided:
b = a[..., :maxlag]
else:
b = np.concatenate([a[..., nfft-maxlag+1:nfft],
a[..., :maxlag]], axis=-1)
#print b, a[..., 0][..., np.newaxis], b / a[..., 0][..., np.newaxis]
if scale == 'coeff':
return b / a[..., 0][..., np.newaxis]
else:
return b
def acorr(x, axis=-1, onesided=False, scale='none'):
"""Compute autocorrelation of x along given axis.
Parameters
----------
x : array-like
signal to correlate.
axis : int
axis along which autocorrelation is computed.
onesided: bool, optional
if True, only returns the right side of the autocorrelation.
scale: {'none', 'coeff'}
scaling mode. If 'coeff', the correlation is normalized such as the
0-lag is equal to 1.
Notes
-----
Use fft for computation: is more efficient than direct computation for
relatively large n.
"""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
if not scale in ['none', 'coeff']:
raise ValueError("scale mode %s not understood" % scale)
maxlag = x.shape[axis]
nfft = 2 ** nextpow2(2 * maxlag - 1)
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag, onesided, scale)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
| import numpy as np
from scipy.fftpack import fft, ifft
__all__ = ['nextpow2', 'acorr']
def nextpow2(n):
"""Return the next power of 2 such as 2^p >= n.
Notes
-----
Infinite and nan are left untouched, negative values are not allowed."""
if np.any(n < 0):
raise ValueError("n should be > 0")
if np.isscalar(n):
f, p = np.frexp(n)
if f == 0.5:
return p-1
elif np.isfinite(f):
return p
else:
return f
else:
f, p = np.frexp(n)
res = f
bet = np.isfinite(f)
exa = (f == 0.5)
res[bet] = p[bet]
res[exa] = p[exa] - 1
return res
def _acorr_last_axis(x, nfft, maxlag, onesided=False, scale='none'):
a = np.real(ifft(np.abs(fft(x, n=nfft) ** 2)))
if onesided:
b = a[..., :maxlag]
else:
b = np.concatenate([a[..., nfft-maxlag+1:nfft],
a[..., :maxlag]], axis=-1)
#print b, a[..., 0][..., np.newaxis], b / a[..., 0][..., np.newaxis]
if scale == 'coeff':
return b / a[..., 0][..., np.newaxis]
else:
return b
def acorr(x, axis=-1, onesided=False, scale='none'):
"""Compute autocorrelation of x along given axis.
Parameters
----------
x : array-like
signal to correlate.
axis : int
axis along which autocorrelation is computed.
onesided: bool, optional
if True, only returns the right side of the autocorrelation.
scale: {'none', 'coeff'}
scaling mode. If 'coeff', the correlation is normalized such as the
0-lag is equal to 1.
Notes
-----
Use fft for computation: is more efficient than direct computation for
relatively large n.
"""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
if not scale in ['none', 'coeff']:
raise ValueError("scale mode %s not understood" % scale)
maxlag = x.shape[axis]
nfft = 2 ** nextpow2(2 * maxlag - 1)
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag, onesided, scale)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
| Python | 0 |
45918f696ff43815a15640b080b68b007c27b2f8 | Clean-up query | scripts/analytics/preprint_summary.py | scripts/analytics/preprint_summary.py | import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from django.db.models import Q
from website.app import init_app
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
LOG_THRESHOLD = 11
class PreprintSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'preprint_summary'
def get_events(self, date):
super(PreprintSummary, self).get_events(date)
from osf.models import PreprintService, PreprintProvider
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)
query_datetime = timestamp_datetime + timedelta(1)
counts = []
for preprint_provider in PreprintProvider.objects.all():
preprint_for_provider_count = PreprintService.objects.filter(Q(
node__isnull=False,
node__is_deleted=False,
provider___id=preprint_provider._id,
date_created__lte=query_datetime)
).count()
counts.append({
'keen': {
'timestamp': timestamp_datetime.isoformat()
},
'provider': {
'name': preprint_provider.name,
'total': preprint_for_provider_count,
},
})
return counts
def get_class():
return PreprintSummary
if __name__ == '__main__':
init_app()
preprint_summary = PreprintSummary()
args = preprint_summary.parse_args()
yesterday = args.yesterday
if yesterday:
date = (datetime.today() - timedelta(1)).date()
else:
date = parse(args.date).date() if args.date else None
events = preprint_summary.get_events(date)
preprint_summary.send_events(events)
| import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from django.db.models import Q
from website.app import init_app
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
LOG_THRESHOLD = 11
class PreprintSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'preprint_summary'
def get_events(self, date):
super(PreprintSummary, self).get_events(date)
from osf.models import PreprintService, PreprintProvider
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)
query_datetime = timestamp_datetime + timedelta(1)
counts = []
for preprint_provider in PreprintProvider.objects.all():
preprint_for_provider_count = PreprintService.objects.filter(Q(
node__isnull=False,node__is_deleted=False,
provider___id=preprint_provider._id,
date_created__lte=query_datetime)).count()
counts.append({
'keen': {
'timestamp': timestamp_datetime.isoformat()
},
'provider': {
'name': preprint_provider.name,
'total': preprint_for_provider_count,
},
})
return counts
def get_class():
return PreprintSummary
if __name__ == '__main__':
init_app()
preprint_summary = PreprintSummary()
args = preprint_summary.parse_args()
yesterday = args.yesterday
if yesterday:
date = (datetime.today() - timedelta(1)).date()
else:
date = parse(args.date).date() if args.date else None
events = preprint_summary.get_events(date)
preprint_summary.send_events(events)
| Python | 0.999883 |
32aec3e5595fe0868b77260cb64be718d4e7f3b8 | Update Keras.py | Momentum/Keras.py | Momentum/Keras.py | from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
batch_size = 128
epochs = 30
learning_rate = 0.1
momentum = 0.9
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
y_train = to_categorical(y_train, num_classes)
x_test = x_test.reshape(10000, 784).astype('float32') / 255
y_test = to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512,
activation='relu',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.add(Dense(512,
activation='relu',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.add(Dense(num_classes,
activation='softmax',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
batch_size = 128
epochs = 30
learning_rate = 0.1
momentum = 0.9
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
y_train = to_categorical(y_train, num_classes)
x_test = x_test.reshape(10000, 784).astype('float32') / 255
y_test = to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512,
activation='tanh',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.add(Dense(512,
activation='tanh',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.add(Dense(num_classes,
activation='softmax',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test)) | Python | 0 |
aa6090b69f64721391dec38de04e8d01d23c48bf | Add tests for differential calculus methods | sympy/calculus/tests/test_singularities.py | sympy/calculus/tests/test_singularities.py | from sympy import Symbol, exp, log
from sympy.calculus.singularities import (singularities, is_increasing,
is_strictly_increasing, is_decreasing,
is_strictly_decreasing)
from sympy.sets import Interval
from sympy import oo, S
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
def test_singularities():
x = Symbol('x', real=True)
assert singularities(x**2, x) == ()
assert singularities(x/(x**2 + 3*x + 2), x) == (-2, -1)
@XFAIL
def test_singularities_non_rational():
x = Symbol('x', real=True)
assert singularities(exp(1/x), x) == (0)
assert singularities(log((x - 2)**2), x) == (2)
def test_is_increasing():
assert is_increasing(x**3 - 3*x**2 + 4*x, S.Reals)
assert is_increasing(-x**2, Interval(-oo, 0))
assert not is_increasing(-x**2, Interval(0, oo))
assert not is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3))
def test_is_strictly_increasing():
assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2))
assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo))
assert not is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3))
assert not is_strictly_increasing(-x**2, Interval(0, oo))
def test_is_decreasing():
assert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
assert not is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
assert not is_decreasing(-x**2, Interval(-oo, 0))
def test_is_strictly_decreasing():
assert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))
assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))
assert not is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))
assert not is_decreasing(-x**2, Interval(-oo, 0))
| from sympy import Symbol, exp, log
from sympy.calculus.singularities import singularities
from sympy.utilities.pytest import XFAIL
def test_singularities():
x = Symbol('x', real=True)
assert singularities(x**2, x) == ()
assert singularities(x/(x**2 + 3*x + 2), x) == (-2, -1)
@XFAIL
def test_singularities_non_rational():
x = Symbol('x', real=True)
assert singularities(exp(1/x), x) == (0)
assert singularities(log((x - 2)**2), x) == (2)
@XFAIL
def test_is_increasing():
pass
@XFAIL
def test_is_strictly_increasing():
pass
@XFAIL
def test_is_decreasing():
pass
@XFAIL
def test_is_strictly_decreasing():
pass
@XFAIL
def is_monotonic():
pass
| Python | 0.000003 |
f84f7e9091725d638e93d1dc14b830118a1833c8 | add returns for views | gps_tracker/views.py | gps_tracker/views.py | from pyramid.view import view_config
points_list = [
{"_id": 'ObjectId("52e3eb56a7cade5d0898e012")', "latitude": "45.215",
"longitude": "14.131", "gas_station": "Lukoil", "odometer": "24100",
"description": "Bad coffee"},
{"_id": 'ObjectId("52e3eb79a7cade5d0898e013")', "latitude": "47.412",
"longitude": "16.112", "gas_station": "Shell", "odometer": "24300",
"description": "Nice service, but fuel is more expensive"},
{"_id": 'ObjectId("52e3eba5a7cade5d0898e014")', "latitude": "48.544",
"longitude": "17.001", "gas_station": "Руснефть", "odometer": "24500",
"description": "На заправке есть гостиница и кафе. Очень хорошо"},
{"_id": 'ObjectId("52e3ec19a7cade5d0898e015")', "latitude": "49.165",
"longitude": "18.125", "gas_station": "Татнефть", "odometer": "24750",
"description": "Есть стоянка кемпинг-стоянка. Дешёвая незамерзайка."},
{"_id": 'ObjectId("52f3aaf0a7cade0d846d00d7")', "gas_station": "Газпром",
"odometer": "28400", "latitude": "49.249",
"description": "Хорошее кафе, есть душ!", "longitude": "19.100"}
]
@view_config(route_name='points', request_method='GET', renderer='json')
def points_get_all(request):
return points_list
@view_config(route_name='points', request_method='POST', renderer='json')
def point_add_new(request):
return points_list[2]
@view_config(route_name='point', request_method='GET', renderer='json')
def point_get_one(request):
return points_list[0]
@view_config(route_name='point', request_method='PATCH', renderer='json')
def point_edit_one(request):
return points_list[1]
@view_config(route_name='point', request_method='DELETE', renderer='json')
def point_delete_one(request):
return {}
| from pyramid.view import view_config
points_list = [
{"_id": 'ObjectId("52e3eb56a7cade5d0898e012")', "latitude": "45.215",
"longitude": "14.131", "gas_station": "Lukoil", "odometer": "24100",
"description": "Bad coffee"},
{"_id": 'ObjectId("52e3eb79a7cade5d0898e013")', "latitude": "47.412",
"longitude": "16.112", "gas_station": "Shell", "odometer": "24300",
"description": "Nice service, but fuel is more expensive"},
{"_id": 'ObjectId("52e3eba5a7cade5d0898e014")', "latitude": "48.544",
"longitude": "17.001", "gas_station": "Руснефть", "odometer": "24500",
"description": "На заправке есть гостиница и кафе. Очень хорошо"},
{"_id": 'ObjectId("52e3ec19a7cade5d0898e015")', "latitude": "49.165",
"longitude": "18.125", "gas_station": "Татнефть", "odometer": "24750",
"description": "Есть стоянка кемпинг-стоянка. Дешёвая незамерзайка."},
{"_id": 'ObjectId("52f3aaf0a7cade0d846d00d7")', "gas_station": "Газпром",
"odometer": "28400", "latitude": "49.249",
"description": "Хорошее кафе, есть душ!", "longitude": "19.100"}
]
@view_config(route_name='points', request_method='GET', renderer='json')
def points_get_all(request):
return points_list
@view_config(route_name='points', request_method='POST', renderer='json')
def point_add_new(request):
return points_list[2]
@view_config(route_name='point', request_method='GET', renderer='json')
def point_get_one(request):
return points_list[0]
@view_config(route_name='point', request_method='PATCH', renderer='json')
def point_edit_one(request):
return {'response': 'point edited'}
@view_config(route_name='point', request_method='DELETE', renderer='json')
def point_delete_one(request):
return {}
| Python | 0 |
d5f84783c376906dd5733391593ceae792b5edda | Bump version to 0.1.0 | vcli/__init__.py | vcli/__init__.py | __version__ = '0.1.0'
| __version__ = '0.0.1'
| Python | 0.000001 |
7c09e8cad8892aa2a491297618b6091e7286c6d3 | move get model code to method | exportdata/management/commands/exportdata.py | exportdata/management/commands/exportdata.py | import os
import csv
import sys
from optparse import make_option
from collections import Callable
from django.contrib.sites.models import Site
from django.db.models.loading import get_model
from django.core.management.base import LabelCommand, CommandError
DOMAIN = Site.objects.get_current().domain
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('--fields', dest='fields'),
make_option('--filters', dest='filters', default=None),
make_option('--ordering', dest='ordering', default=None),
# TODO: advanced filtration, ranges
)
help = 'Export any data in csv'
label = 'app.model'
def get_model(self, label):
app, model = label.split('.', 1)
Model = get_model(app, model)
if not Model:
raise CommandError('Model "{0}" not found!'.format(label))
return Model
def handle_label(self, label, **options):
Model = self.get_model(label)
filename = os.path.join(os.path.expanduser('~'),
'{0}.csv'.format(label))
resultcsv = csv.writer(open(filename, 'wb'), delimiter=';',
quoting=csv.QUOTE_MINIMAL)
fields = options.get('fields')
filters = options.get('filters', None)
ordering = options.get('ordering', None)
qs = Model.objects.all()
if filters:
filters = filters.split(',')
for filter_name in filters:
if not hasattr(qs, filter_name):
raise CommandError(
'Model "{0}" not not to have "{1}" filter'.format(
label,
filter_name
)
)
qs = getattr(qs, filter_name)()
if ordering:
ordering = ordering.split(',')
qs = qs.order_by(*ordering)
fields = fields.split(',')
resultcsv.writerow(fields)
for obj in qs:
result = []
for field_name in fields:
if '__' in field_name:
field_name = field_name.split('__', 1)
field = getattr(obj, field_name[0], None)
field = getattr(field, field_name[1], None)
else:
field = getattr(obj, field_name, None)
if field_name == 'get_absolute_url':
# hack, because in python not possible
# check function has a decorator
field = field()
field = u'http://{0}{1}'.format(DOMAIN, field)
if isinstance(field, Callable):
field = field()
if isinstance(field, (str, unicode,)):
field = field.encode('utf-8')
result.append(field)
resultcsv.writerow(result)
sys.exit('Done! Exported objects: {0}'.format(qs.count()))
| import os
import csv
import sys
from optparse import make_option
from collections import Callable
from django.contrib.sites.models import Site
from django.db.models.loading import get_model
from django.core.management.base import LabelCommand, CommandError
DOMAIN = Site.objects.get_current().domain
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('--fields', dest='fields'),
make_option('--filters', dest='filters', default=None),
make_option('--ordering', dest='ordering', default=None),
# TODO: advanced filtration, ranges
)
help = 'Export any data in csv'
label = 'app.model'
def handle_label(self, label, **options):
app, model = label.split('.', 1)
Model = get_model(app, model)
if not Model:
raise CommandError('Model "{0}" not found!'.format(label))
filename = os.path.join(os.path.expanduser('~'),
'{0}.csv'.format(label))
resultcsv = csv.writer(open(filename, 'wb'), delimiter=';',
quoting=csv.QUOTE_MINIMAL)
fields = options.get('fields')
filters = options.get('filters', None)
ordering = options.get('ordering', None)
qs = Model.objects.all()
if filters:
filters = filters.split(',')
for filter_name in filters:
if not hasattr(qs, filter_name):
raise CommandError(
'Model "{0}" not not to have "{1}" filter'.format(
label,
filter_name
)
)
qs = getattr(qs, filter_name)()
if ordering:
ordering = ordering.split(',')
qs = qs.order_by(*ordering)
fields = fields.split(',')
resultcsv.writerow(fields)
for obj in qs:
result = []
for field_name in fields:
if '__' in field_name:
field_name = field_name.split('__', 1)
field = getattr(obj, field_name[0], None)
field = getattr(field, field_name[1], None)
else:
field = getattr(obj, field_name, None)
if field_name == 'get_absolute_url':
# hack, because in python not possible
# check function has a decorator
field = field()
field = u'http://{0}{1}'.format(DOMAIN, field)
if isinstance(field, Callable):
field = field()
if isinstance(field, (str, unicode,)):
field = field.encode('utf-8')
result.append(field)
resultcsv.writerow(result)
sys.exit('Done! Exported objects: {0}'.format(qs.count()))
| Python | 0 |
680122f69c5aab9be4dc1965024ac882326d1c5b | Add license to watched.py | derpibooru/watched.py | derpibooru/watched.py | # Copyright (c) 2014, Joshua Stone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Watched(object):
def __init__(self, key, page=1, perpage=15, comments=False, fav=False):
self.__parameters = {}
self.key = key
self.page = page
self.perpage = perpage
self.comments = comments
self.fav = fav
@property
def hostname(self):
return("https://derpiboo.ru")
@property
def key(self):
return(self.parameters["key"])
@key.setter
def key(self, key=""):
if not isinstance(key, str):
raise TypeError("key must be a string")
self.__parameters["key"] = key
@property
def page(self):
return(self.parameters["page"])
@page.setter
def page(self, page=1):
if not isinstance(page, int):
raise TypeError("page number must be an int")
if page < 1:
raise ValueError("page number must be greater than 0")
self.__parameters["page"] = page
def next_page(self, number=1):
if not isinstance(number, int):
raise TypeError("page number must be an int")
if number < 1:
raise ValueError("page number must be greater than 0")
self.__parameters["page"] += number
def previous_page(self, number=1):
if not isinstance(number, int):
raise TypeError("page number must be an int")
if number < 1:
raise ValueError("page number must be greater than 0")
if self.parameters["page"] - number <= 1:
self.__parameters["page"] = 1
else:
self.__parameters["page"] -= number
@property
def perpage(self):
return(self.parameters["perpage"])
@perpage.setter
def perpage(self, page_size):
if not isinstance(page_size, int):
raise TypeError("perpage must be an int")
if page_size not in range(1, 51):
raise ValueError("perpage must be within range of 1-50")
self.__parameters["perpage"] = page_size
@property
def comments(self):
return(self.parameters["comments"])
@comments.setter
def comments(self, comments=True):
if not isinstance(comments, bool):
raise TypeError("comments must be either True or False")
self.__parameters["comments"] = comments
@property
def fav(self):
return(self.parameters["fav"])
@fav.setter
def fav(self, fav=True):
if not isinstance(fav, bool):
raise TypeError("favorites must be either True or False")
self.__parameters["fav"] = fav
@property
def parameters(self):
return(self.__parameters)
@property
def url(self):
url, parameters = self.hostname + "/images/watched.json", []
parameters.append("key={0}".format(self.key))
parameters.append("perpage={0}".format(self.perpage))
parameters.append("page={0}".format(self.page))
if self.comments == True:
parameters.append("comments=")
if self.fav == True:
parameters.append("fav=")
url += "?{0}".format("&".join(parameters))
return(url)
@property
def random(self):
url = self.hostname + "/images/watched.json?random=y&key=" + self.key
return(url)
|
class Watched(object):
def __init__(self, key, page=1, perpage=15, comments=False, fav=False):
self.__parameters = {}
self.key = key
self.page = page
self.perpage = perpage
self.comments = comments
self.fav = fav
@property
def hostname(self):
return("https://derpiboo.ru")
@property
def key(self):
return(self.parameters["key"])
@key.setter
def key(self, key=""):
if not isinstance(key, str):
raise TypeError("key must be a string")
self.__parameters["key"] = key
@property
def page(self):
return(self.parameters["page"])
@page.setter
def page(self, page=1):
if not isinstance(page, int):
raise TypeError("page number must be an int")
if page < 1:
raise ValueError("page number must be greater than 0")
self.__parameters["page"] = page
def next_page(self, number=1):
if not isinstance(number, int):
raise TypeError("page number must be an int")
if number < 1:
raise ValueError("page number must be greater than 0")
self.__parameters["page"] += number
def previous_page(self, number=1):
if not isinstance(number, int):
raise TypeError("page number must be an int")
if number < 1:
raise ValueError("page number must be greater than 0")
if self.parameters["page"] - number <= 1:
self.__parameters["page"] = 1
else:
self.__parameters["page"] -= number
@property
def perpage(self):
return(self.parameters["perpage"])
@perpage.setter
def perpage(self, page_size):
if not isinstance(page_size, int):
raise TypeError("perpage must be an int")
if page_size not in range(1, 51):
raise ValueError("perpage must be within range of 1-50")
self.__parameters["perpage"] = page_size
@property
def comments(self):
return(self.parameters["comments"])
@comments.setter
def comments(self, comments=True):
if not isinstance(comments, bool):
raise TypeError("comments must be either True or False")
self.__parameters["comments"] = comments
@property
def fav(self):
return(self.parameters["fav"])
@fav.setter
def fav(self, fav=True):
if not isinstance(fav, bool):
raise TypeError("favorites must be either True or False")
self.__parameters["fav"] = fav
@property
def parameters(self):
return(self.__parameters)
@property
def url(self):
url, parameters = self.hostname + "images/watched.json", []
parameters.append("key={0}".format(self.key))
parameters.append("perpage={0}".format(self.perpage))
parameters.append("page={0}".format(self.page))
if self.comments == True:
parameters.append("comments=")
if self.fav == True:
parameters.append("fav=")
url += "?{0}".format("&".join(parameters))
return(url)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.