commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
896482b83ad75c445e72dbb0eb6bc7246662f699 | access token is adjusted | skybotapp/views.py | skybotapp/views.py | import json
import requests
from pprint import pprint
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
# yomamabot/fb_yomamabot/views.py
from django.views import generic
from django.http.response import HttpResponse
from django.template.context_processors import request
# Create your views here.
def post_facebook_message(fbid, recevied_message):
post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=<EAASfh0TDd8cBAHBMfkWQGAexatTOup01lZCXtUJ5CF5Imr5b7MeQu30v6TnEzQmvoJF9MZBzkoZBdhLaVcCSY2BtPivUNJh7pic5vfEA13qDr3TRQLuHn8aKpKZAip4X2QHqhBTa7XQNGPnII1cqNMP46gAaRYMzHHSnZA4NZCAwZDZD>'
response_msg = json.dumps({"recipient":{"id":fbid}, "message":{"text":recevied_message}})
status = requests.post(post_message_url, headers={"Content-Type": "application/json"},data=response_msg)
pprint(status.json())
class SkyBotView(generic.View):
# def get(self, request, *args, **kwargs):
# if self.request.GET['hub.verify_token'] == '93985762':
# return HttpResponse(self.request.GET['hub.challenge'])
# else:
# return HttpResponse('Error, invalid token')
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return generic.View.dispatch(self, request, *args, **kwargs)
# Post function to handle Facebook messages
def post(self, request, *args, **kwargs):
# Converts the text payload into a python dictionary
incoming_message = json.loads(self.request.body.decode('utf-8'))
# Facebook recommends going through every entry since they might send
# multiple messages in a single call during high load
for entry in incoming_message['entry']:
for message in entry['messaging']:
# Check to make sure the received call is a message call
# This might be delivery, optin, postback for other events
if 'message' in message:
# Print the message to the terminal
pprint(message)
post_facebook_message(message['sender']['id'], message['message']['text'])
return HttpResponse()
def homeView(request):
return HttpResponse('Hello')
| import json
import requests
from pprint import pprint
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
# yomamabot/fb_yomamabot/views.py
from django.views import generic
from django.http.response import HttpResponse
from django.template.context_processors import request
# Create your views here.
def post_facebook_message(fbid, recevied_message):
post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=<page-access-token>'
response_msg = json.dumps({"recipient":{"id":fbid}, "message":{"text":recevied_message}})
status = requests.post(post_message_url, headers={"Content-Type": "application/json"},data=response_msg)
pprint(status.json())
class SkyBotView(generic.View):
# def get(self, request, *args, **kwargs):
# if self.request.GET['hub.verify_token'] == '93985762':
# return HttpResponse(self.request.GET['hub.challenge'])
# else:
# return HttpResponse('Error, invalid token')
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return generic.View.dispatch(self, request, *args, **kwargs)
# Post function to handle Facebook messages
def post(self, request, *args, **kwargs):
# Converts the text payload into a python dictionary
incoming_message = json.loads(self.request.body.decode('utf-8'))
# Facebook recommends going through every entry since they might send
# multiple messages in a single call during high load
for entry in incoming_message['entry']:
for message in entry['messaging']:
# Check to make sure the received call is a message call
# This might be delivery, optin, postback for other events
if 'message' in message:
# Print the message to the terminal
pprint(message)
post_facebook_message(message['sender']['id'], message['message']['text'])
return HttpResponse()
def homeView(request):
return HttpResponse('Hello')
| Python | 0.000003 |
06fa3a4625576a0d7d4897dabcc2979c36d62ce1 | Remove unused code | dwarf/image/api_response.py | dwarf/image/api_response.py | #!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dwarf.utils import template
DETAILS = ('created_at', 'deleted', 'deleted_at', 'updated_at')
# -----------------------------------------------------------------------------
# Images API responses
IMAGE = DETAILS + ('checksum', 'container_format', 'disk_format', 'id',
'is_public', 'location', 'min_disk', 'min_ram', 'name',
'owner', 'protected', 'size', 'status')
IMAGE_PROPERTIES = {'properties': {}}
def images_create(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_list(data):
return {"images": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_update(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
| #!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dwarf.utils import template
DETAILS = ('created_at', 'deleted', 'deleted_at', 'updated_at')
# -----------------------------------------------------------------------------
# Images API responses
IMAGE = DETAILS + ('checksum', 'container_format', 'disk_format', 'id',
'is_public', 'location', 'min_disk', 'min_ram', 'name',
'owner', 'protected', 'size', 'status')
IMAGE_PROPERTIES = {'properties': {}}
def images_create(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_list(data):
return {"images": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_show(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
def images_update(data):
return {"image": template(IMAGE, data, add=IMAGE_PROPERTIES)}
| Python | 0 |
1334c8fa989981e3c917cdc16869b04ad1c2f6e0 | add --g-fatal-warnings gtk option | snaked/core/run.py | snaked/core/run.py | from optparse import OptionParser
import os
def get_manager():
parser = OptionParser()
parser.add_option('-s', '--session', dest='session',
help="Open snaked with specified session", default='default')
parser.add_option('', '--select-session', action="store_true", dest='select_session',
help="Show dialog to select session at startup", default=False)
parser.add_option('-d', '--debug', action="store_true", dest='debug',
help="Run embedded drainhunter", default=False)
parser.add_option('', '--g-fatal-warnings', action="store_true")
options, args = parser.parse_args()
if options.select_session:
from snaked.core.gui import session_selector
options.session = session_selector.select_session()
from .app import is_master, serve
master, conn = is_master(options.session)
if master:
import gobject
gobject.threads_init()
from .manager import EditorManager
manager = EditorManager(options.session)
manager.start(args)
serve(manager, conn)
if options.debug:
import drainhunter.server
drainhunter.server.run()
return manager
else:
conn.send(['OPEN'] + list(map(os.path.abspath, args)))
conn.send(['END'])
conn.close()
return None
def run():
manager = get_manager()
if not manager:
return
import gtk
try:
gtk.main()
except KeyboardInterrupt:
manager.quit()
| from optparse import OptionParser
import os
def get_manager():
parser = OptionParser()
parser.add_option('-s', '--session', dest='session',
help="Open snaked with specified session", default='default')
parser.add_option('', '--select-session', action="store_true", dest='select_session',
help="Show dialog to select session at startup", default=False)
parser.add_option('-d', '--debug', action="store_true", dest='debug',
help="Run embedded drainhunter", default=False)
options, args = parser.parse_args()
if options.select_session:
from snaked.core.gui import session_selector
options.session = session_selector.select_session()
from .app import is_master, serve
master, conn = is_master(options.session)
if master:
import gobject
gobject.threads_init()
from .manager import EditorManager
manager = EditorManager(options.session)
manager.start(args)
serve(manager, conn)
if options.debug:
import drainhunter.server
drainhunter.server.run()
return manager
else:
conn.send(['OPEN'] + list(map(os.path.abspath, args)))
conn.send(['END'])
conn.close()
return None
def run():
manager = get_manager()
if not manager:
return
import gtk
try:
gtk.main()
except KeyboardInterrupt:
manager.quit()
| Python | 0.000001 |
6670fe1d081e27417a3d340e2c12c061078582af | Bump version (pre-release) | django_xhtml2pdf/__init__.py | django_xhtml2pdf/__init__.py | # -*- coding: utf-8 -*-
"""
See PEP 386 (http://www.python.org/dev/peps/pep-0386/)
Release logic:
1. Remove "dev" from current.
2. git commit
3. git tag <version>
4. push to pypi + push to github
5. bump the version, append '.dev0'
6. git commit
7. push to github (to avoid confusion)
"""
__version__ = '0.0.3'
| # -*- coding: utf-8 -*-
"""
See PEP 386 (http://www.python.org/dev/peps/pep-0386/)
Release logic:
1. Remove "dev" from current.
2. git commit
3. git tag <version>
4. push to pypi + push to github
5. bump the version, append '.dev0'
6. git commit
7. push to github (to avoid confusion)
"""
__version__ = '0.0.3.dev0'
| Python | 0 |
94d18ba6ede9dc58a558c68fd3af9bbcadc7f189 | Update urls.py For Django 1.6 | djangobb_forum/tests/urls.py | djangobb_forum/tests/urls.py | from django.conf.urls import patterns, include
urlpatterns = patterns('',
(r'^forum/', include('djangobb_forum.urls', namespace='djangobb')),
)
| from django.conf.urls.defaults import patterns, include
urlpatterns = patterns('',
(r'^forum/', include('djangobb_forum.urls', namespace='djangobb')),
)
| Python | 0.000001 |
bdceb4c7bc0b71755d9f63974a5597e29fd94e75 | comment test code | tester.py | tester.py | import urllib2
from socket import p
import settings
import random
import threading
import Queue
import json
import requests
from settings import USER_AGENTS
def makeRequest(proxy, target):
i_headers = {'User-Agent': random.choice(USER_AGENTS)}
print("\n")
try:
r = requests.get(target, proxies=proxy, headers=i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (target, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (target, str(proxy))
return True
def makeAProxyRequest(proxy, testTarget):
i_headers = {'User-Agent':random.choice(settings.USER_AGENTS)}
url = testTarget
print("\n")
try:
r = requests.get(url, proxies=proxy, headers = i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (testTarget, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (testTarget, str(proxy))
return True
def makeFullTestForOneProxy(proxy, type = 'ALL'):
checkedCount = 0
for testTarget in settings.TestTargetsCN:
connected = makeAProxyRequest(proxy, testTarget)
if connected == True:
checkedCount += 1
quality = checkedCount * 1.0 / len(settings.TestTargetsCN)
return quality
class WorkThread(threading.Thread):
def __init__(self, name, workQueue, aa=None):
super(WorkThread, self).__init__()
self.queue = workQueue
self.name = name
self.aa = aa
def run(self):
print "Starting " + self.name
while True:
if self.queue.empty():
print "Exiting " + self.name
break
proxy = self.queue.get()
if proxy != None:
print "Thread: " + self.name + " Size: " + str(self.queue.qsize())
if self.aa == None:
makeFullTestForOneProxy(proxy)
else:
makeAProxyRequest(proxy, self.aa)
self.queue.task_done()
# makeFullTestForOneProxy({"http":"115.218.126.59:9000"})
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.woshipm.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'https://www.baidu.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.v2ex.com/')
# jsonFile = "proxy.json"
# f = open(jsonFile)
# fileData = f.read()
# f.close()
# proxys = json.loads(fileData)
#
#
# workQueue = Queue.Queue(0)
#
# for proxy in proxys:
# workQueue.put(proxy)
#
# for i in range(10):
# name = "Thread " + str(i)
# thread = WorkThread(name, workQueue)
# thread.start()
# workQueue.join()
| import urllib2
from socket import p
import settings
import random
import threading
import Queue
import json
import requests
from settings import USER_AGENTS
def makeRequest(proxy, target):
i_headers = {'User-Agent': random.choice(USER_AGENTS)}
print("\n")
try:
r = requests.get(target, proxies=proxy, headers=i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (target, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (target, str(proxy))
return True
def makeAProxyRequest(proxy, testTarget):
i_headers = {'User-Agent':random.choice(settings.USER_AGENTS)}
url = testTarget
print("\n")
try:
r = requests.get(url, proxies=proxy, headers = i_headers, timeout=5)
except Exception, e:
print "Test Failed: %s By %s \nException: %s" % (testTarget, str(proxy), str(e))
return False
else:
print "Test Successed: %s By %s" % (testTarget, str(proxy))
return True
def makeFullTestForOneProxy(proxy, type = 'ALL'):
checkedCount = 0
for testTarget in settings.TestTargetsCN:
connected = makeAProxyRequest(proxy, testTarget)
if connected == True:
checkedCount += 1
quality = checkedCount * 1.0 / len(settings.TestTargetsCN)
return quality
class WorkThread(threading.Thread):
def __init__(self, name, workQueue, aa=None):
super(WorkThread, self).__init__()
self.queue = workQueue
self.name = name
self.aa = aa
def run(self):
print "Starting " + self.name
while True:
if self.queue.empty():
print "Exiting " + self.name
break
proxy = self.queue.get()
if proxy != None:
print "Thread: " + self.name + " Size: " + str(self.queue.qsize())
if self.aa == None:
makeFullTestForOneProxy(proxy)
else:
makeAProxyRequest(proxy, self.aa)
self.queue.task_done()
# makeFullTestForOneProxy({"http":"115.218.126.59:9000"})
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.woshipm.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'https://www.baidu.com/')
# makeAProxyRequest({"http":"115.218.216.90:9000"}, 'http://www.v2ex.com/')
jsonFile = "proxy.json"
f = open(jsonFile)
fileData = f.read()
f.close()
proxys = json.loads(fileData)
workQueue = Queue.Queue(0)
for proxy in proxys:
workQueue.put(proxy)
for i in range(10):
name = "Thread " + str(i)
thread = WorkThread(name, workQueue)
thread.start()
workQueue.join()
| Python | 0 |
c6cde6a72204a9e688ea0d6dfe9550f2cb39a0fc | resolve incorrect merge conflict resolution | common/lib/xmodule/xmodule/modulestore/tests/test_xml.py | common/lib/xmodule/xmodule/modulestore/tests/test_xml.py | import os.path
from nose.tools import assert_raises, assert_equals
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import XML_MODULESTORE_TYPE
from .test_modulestore import check_path_to_location
from xmodule.tests import DATA_DIR
class TestXMLModuleStore(object):
def test_path_to_location(self):
"""Make sure that path_to_location works properly"""
print "Starting import"
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
print "finished import"
check_path_to_location(modulestore)
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
assert_equals(store.get_modulestore_type('foo/bar/baz'), XML_MODULESTORE_TYPE)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with assert_raises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
location = CourseDescriptor.id_to_location("edX/toy/2012_Fall")
errors = modulestore.get_item_errors(location)
assert errors == []
| import os.path
from nose.tools import assert_raises, assert_equals
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import XML_MODULESTORE_TYPE
from .test_modulestore import check_path_to_location
from . import DATA_DIR
class TestXMLModuleStore(object):
def test_path_to_location(self):
"""Make sure that path_to_location works properly"""
print "Starting import"
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
print "finished import"
check_path_to_location(modulestore)
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
assert_equals(store.get_modulestore_type('foo/bar/baz'), XML_MODULESTORE_TYPE)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with assert_raises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
location = CourseDescriptor.id_to_location("edX/toy/2012_Fall")
errors = modulestore.get_item_errors(location)
assert errors == []
| Python | 0.000005 |
c79ccb44edbca1ccc5d5b1bddb4fa8dc19e6df66 | update middleware for django 1.10 issue | chapter6/growth_studio/settings.py | chapter6/growth_studio/settings.py | """
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
### settings.py file
### settings that are not environment dependent
try:
from .local_settings import *
except ImportError:
pass
| """
Django settings for growth_studio project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q2c4xbdh)hf-$z7v1dyai3n^+(g%l5ogi17rm+rud^ysbx-(h0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'growth_studio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'growth_studio.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
### settings.py file
### settings that are not environment dependent
try:
from .local_settings import *
except ImportError:
pass
| Python | 0 |
2b3667dfc4fbd6571da288146d4e8f8f8f2d51a1 | Fix broken sorted set unit test. | test/unit/test_sorted_set.py | test/unit/test_sorted_set.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 3)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
| # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
| Python | 0 |
656e41ef504d42d2fcf0155aaedccc45c9d72c33 | Add the null handler to the root logger to prevent Tornado from doing logging.basicConfig | rejected/controller.py | rejected/controller.py | """
OS Level controlling class invokes startup, shutdown and handles signals.
"""
import clihelper
import logging
import signal
import sys
from rejected import common
from rejected import mcp
from rejected import __version__
LOGGER = logging.getLogger(__name__)
class Controller(clihelper.Controller):
"""Rejected Controller application that invokes the MCP and handles all
of the OS level concerns.
"""
def _master_control_program(self):
"""Return an instance of the MasterControlProgram.
:rtype: rejected.mcp.MasterControlProgram
"""
return mcp.MasterControlProgram(self._config,
consumer=self._options.consumer,
profile=self._options.profile,
quantity=self._options.quantity)
def _prepend_python_path(self, path): #pragma: no cover
"""Add the specified value to the python path.
:param str path: The path to append
"""
LOGGER.debug('Prepending "%s" to the python path.', path)
sys.path.insert(0, path)
def _setup(self):
"""Continue the run process blocking on MasterControlProgram.run"""
# If the app was invoked to specified to prepend the path, do so now
common.add_null_handler()
if self._options.prepend_path:
self._prepend_python_path(self._options.prepend_path)
def stop(self):
"""Shutdown the MCP and child processes cleanly"""
LOGGER.info('Shutting down controller')
self.set_state(self.STATE_STOP_REQUESTED)
# Clear out the timer
signal.setitimer(signal.ITIMER_PROF, 0, 0)
self._mcp.stop_processes()
if self._mcp.is_running:
LOGGER.info('Waiting up to 3 seconds for MCP to shut things down')
signal.setitimer(signal.ITIMER_REAL, 3, 0)
signal.pause()
LOGGER.info('Post pause')
# Force MCP to stop
if self._mcp.is_running:
LOGGER.warning('MCP is taking too long, requesting process kills')
self._mcp.stop_processes()
del self._mcp
else:
LOGGER.info('MCP exited cleanly')
# Change our state
self._stopped()
LOGGER.info('Shutdown complete')
def run(self):
"""Run the rejected Application"""
self._setup()
self._mcp = self._master_control_program()
try:
self._mcp.run()
except KeyboardInterrupt:
LOGGER.info('Caught CTRL-C, shutting down')
clihelper.setup_logging(self._debug)
if self.is_running:
self.stop()
def _cli_options(parser):
"""Add options to the parser
:param optparse.OptionParser parser: The option parser to add options to
"""
parser.add_option('-P', '--profile',
action='store',
default=None,
dest='profile',
help='Profile the consumer modules, specifying '
'the output directory.')
parser.add_option('-o', '--only',
action='store',
default=None,
dest='consumer',
help='Only run the consumer specified')
parser.add_option('-p', '--prepend-path',
action='store',
default=None,
dest='prepend_path',
help='Prepend the python path with the value.')
parser.add_option('-q', '--qty',
action='store',
type='int',
default=1,
dest='quantity',
help='Run the specified quanty of consumer processes '
'when used in conjunction with -o')
def main():
"""Called when invoking the command line script."""
clihelper.setup('rejected', 'RabbitMQ consumer framework', __version__)
clihelper.run(Controller, _cli_options)
if __name__ == '__main__':
main()
| """
OS Level controlling class invokes startup, shutdown and handles signals.
"""
import clihelper
import logging
import signal
import sys
from rejected import mcp
from rejected import __version__
LOGGER = logging.getLogger(__name__)
class Controller(clihelper.Controller):
"""Rejected Controller application that invokes the MCP and handles all
of the OS level concerns.
"""
def _master_control_program(self):
"""Return an instance of the MasterControlProgram.
:rtype: rejected.mcp.MasterControlProgram
"""
return mcp.MasterControlProgram(self._config,
consumer=self._options.consumer,
profile=self._options.profile,
quantity=self._options.quantity)
def _prepend_python_path(self, path): #pragma: no cover
"""Add the specified value to the python path.
:param str path: The path to append
"""
LOGGER.debug('Prepending "%s" to the python path.', path)
sys.path.insert(0, path)
def _setup(self):
"""Continue the run process blocking on MasterControlProgram.run"""
# If the app was invoked to specified to prepend the path, do so now
if self._options.prepend_path:
self._prepend_python_path(self._options.prepend_path)
def stop(self):
"""Shutdown the MCP and child processes cleanly"""
LOGGER.info('Shutting down controller')
self.set_state(self.STATE_STOP_REQUESTED)
# Clear out the timer
signal.setitimer(signal.ITIMER_PROF, 0, 0)
self._mcp.stop_processes()
if self._mcp.is_running:
LOGGER.info('Waiting up to 3 seconds for MCP to shut things down')
signal.setitimer(signal.ITIMER_REAL, 3, 0)
signal.pause()
LOGGER.info('Post pause')
# Force MCP to stop
if self._mcp.is_running:
LOGGER.warning('MCP is taking too long, requesting process kills')
self._mcp.stop_processes()
del self._mcp
else:
LOGGER.info('MCP exited cleanly')
# Change our state
self._stopped()
LOGGER.info('Shutdown complete')
def run(self):
"""Run the rejected Application"""
self._setup()
self._mcp = self._master_control_program()
try:
self._mcp.run()
except KeyboardInterrupt:
LOGGER.info('Caught CTRL-C, shutting down')
clihelper.setup_logging(self._debug)
if self.is_running:
self.stop()
def _cli_options(parser):
"""Add options to the parser
:param optparse.OptionParser parser: The option parser to add options to
"""
parser.add_option('-P', '--profile',
action='store',
default=None,
dest='profile',
help='Profile the consumer modules, specifying '
'the output directory.')
parser.add_option('-o', '--only',
action='store',
default=None,
dest='consumer',
help='Only run the consumer specified')
parser.add_option('-p', '--prepend-path',
action='store',
default=None,
dest='prepend_path',
help='Prepend the python path with the value.')
parser.add_option('-q', '--qty',
action='store',
type='int',
default=1,
dest='quantity',
help='Run the specified quanty of consumer processes '
'when used in conjunction with -o')
def main():
"""Called when invoking the command line script."""
clihelper.setup('rejected', 'RabbitMQ consumer framework', __version__)
clihelper.run(Controller, _cli_options)
if __name__ == '__main__':
main()
| Python | 0.000001 |
db0e6265892231ecf10244eb7ddcddc62a12b82b | Fix bug where cached items in subfolders would be re-read. | configmanager.py | configmanager.py | import json
import os
import os.path
class ConfigManager():
_cache = {}
def __init__(self, configPath = "configs/"):
if os.path.isdir(configPath):
self.configPath = configPath
else:
raise IOError("Config Path does not eixst")
self._configs = {}
self._syncCache()
self.getConfigs()
def __getitem__(self, key):
try:
return self._configs[key]
except KeyError:
self.syncCache()
return self._configs[key]
#Recursive function to get all files. Sub is the relative path from the root config dir.
def getConfigs(self, path = None, sub = "", overrideCache = False):
if path == None:
path = self.configPath
files = os.listdir(path)
for item in files:
#Ignore hidden files.
if item[0] == ".":
continue
#Remove the .json handle from the name
name = item.replace(".json", "")
finalName = os.path.join(sub, name)
#If it's a directory, run this function again within that directory
if os.path.isdir(os.path.join(path, item)):
self.getConfigs(path = os.path.join(path, item), sub = os.path.join(sub, item))
#If we already have something from the cache, or added in previously, skip it.
elif overrideCache or finalName not in self._configs:
#Read in the file
f = open(os.path.join(path, item), "r")
#Check if it's JSON. If it is, it will be parsed.
parsed = self.parseConfig(f.read())
f.close()
if parsed != None:
self.addConfig(finalName, parsed)
#Returns parsed JSON if config is valid JSON, otherwise, return Noen
def parseConfig(self, config):
try:
return json.loads(config)
except ValueError:
return None
def addConfig(self, name, contents):
self._configs[name] = contents
ConfigManager._cache[name] = contents
def _syncCache(self):
unmatchedKeys = [key for key in ConfigManager._cache.keys() if key not in self._configs]
for key in unmatchedKeys:
self._configs[key] = ConfigManager._cache[key]
| import json
import os
import os.path
class ConfigManager():
_cache = {}
def __init__(self, configPath = "configs/"):
if os.path.isdir(configPath):
self.configPath = configPath
else:
raise IOError("Config Path does not eixst")
self._configs = {}
self._syncCache()
self.getConfigs()
def __getitem__(self, key):
try:
return self._configs[key]
except KeyError:
self.syncCache()
return self._configs[key]
#Recursive function to get all files. Sub is the relative path from the root config dir.
def getConfigs(self, path = None, sub = "", overrideCache = False):
if path == None:
path = self.configPath
files = os.listdir(path)
for item in files:
#Ignore hidden files.
if item[0] == ".":
continue
#Remove the .json handle from the name
name = item.replace(".json", "")
finalPath = os.path.join(sub, name)
#If it's a directory, run this function again within that directory
if os.path.isdir(os.path.join(path, item)):
self.getConfigs(path = os.path.join(path, item), sub = os.path.join(sub, item))
#If we already have something from the cache, skip it.
elif overrideCache or name not in self._configs:
#Read in the file
f = open(os.path.join(path, item), "r")
#Check if it's JSON. If it is, it will be parsed.
parsed = self.parseConfig(f.read())
f.close()
if parsed != None:
self.addConfig(finalPath, parsed)
#Returns parsed JSON if config is valid JSON, otherwise, return Noen
def parseConfig(self, config):
try:
return json.loads(config)
except ValueError:
return None
def addConfig(self, name, contents):
self._configs[name] = contents
ConfigManager._cache[name] = contents
def _syncCache(self):
unmatchedKeys = [key for key in ConfigManager._cache.keys() if key not in self._configs]
for key in unmatchedKeys:
self._configs[key] = ConfigManager._cache[key]
| Python | 0 |
109a07b8344df9c2420c2cea7f9bd6419284c920 | Fix get_employees_with_number query | erpnext/communication/doctype/call_log/call_log.py | erpnext/communication/doctype/call_log/call_log.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from erpnext.crm.doctype.utils import get_scheduled_employees_for_popup, strip_number
from frappe.contacts.doctype.contact.contact import get_contact_with_phone_number
from erpnext.crm.doctype.lead.lead import get_lead_with_phone_number
class CallLog(Document):
def before_insert(self):
number = strip_number(self.get('from'))
self.contact = get_contact_with_phone_number(number)
self.lead = get_lead_with_phone_number(number)
def after_insert(self):
self.trigger_call_popup()
def on_update(self):
doc_before_save = self.get_doc_before_save()
if not doc_before_save: return
if doc_before_save.status in ['Ringing'] and self.status in ['Missed', 'Completed']:
frappe.publish_realtime('call_{id}_disconnected'.format(id=self.id), self)
elif doc_before_save.to != self.to:
self.trigger_call_popup()
def trigger_call_popup(self):
scheduled_employees = get_scheduled_employees_for_popup(self.to)
employee_emails = get_employees_with_number(self.to)
# check if employees with matched number are scheduled to receive popup
emails = set(scheduled_employees).intersection(employee_emails)
# # if no employee found with matching phone number then show popup to scheduled employees
# emails = emails or scheduled_employees if employee_emails
for email in emails:
frappe.publish_realtime('show_call_popup', self, user=email)
@frappe.whitelist()
def add_call_summary(call_log, summary):
doc = frappe.get_doc('Call Log', call_log)
doc.add_comment('Comment', frappe.bold(_('Call Summary')) + '<br><br>' + summary)
def get_employees_with_number(number):
number = strip_number(number)
if not number: return []
employee_emails = frappe.cache().hget('employees_with_number', number)
if employee_emails: return employee_emails
employees = frappe.get_all('Employee', filters={
'cell_number': ['like', '%{}%'.format(number)],
'user_id': ['!=', '']
}, fields=['user_id'])
employee_emails = [employee.user_id for employee in employees]
frappe.cache().hset('employees_with_number', number, employee_emails)
return employee
def set_caller_information(doc, state):
'''Called from hooks on creation of Lead or Contact'''
if doc.doctype not in ['Lead', 'Contact']: return
numbers = [doc.get('phone'), doc.get('mobile_no')]
# contact for Contact and lead for Lead
fieldname = doc.doctype.lower()
# contact_name or lead_name
display_name_field = '{}_name'.format(fieldname)
for number in numbers:
number = strip_number(number)
if not number: continue
filters = frappe._dict({
'from': ['like', '%{}'.format(number)],
fieldname: ''
})
logs = frappe.get_all('Call Log', filters=filters)
for log in logs:
frappe.db.set_value('Call Log', log.name, {
fieldname: doc.name,
display_name_field: doc.get_title()
}, update_modified=False)
| # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from erpnext.crm.doctype.utils import get_scheduled_employees_for_popup, strip_number
from frappe.contacts.doctype.contact.contact import get_contact_with_phone_number
from erpnext.crm.doctype.lead.lead import get_lead_with_phone_number
class CallLog(Document):
def before_insert(self):
number = strip_number(self.get('from'))
self.contact = get_contact_with_phone_number(number)
self.lead = get_lead_with_phone_number(number)
def after_insert(self):
self.trigger_call_popup()
def on_update(self):
doc_before_save = self.get_doc_before_save()
if not doc_before_save: return
if doc_before_save.status in ['Ringing'] and self.status in ['Missed', 'Completed']:
frappe.publish_realtime('call_{id}_disconnected'.format(id=self.id), self)
elif doc_before_save.to != self.to:
self.trigger_call_popup()
def trigger_call_popup(self):
scheduled_employees = get_scheduled_employees_for_popup(self.to)
employee_emails = get_employees_with_number(self.to)
# check if employees with matched number are scheduled to receive popup
emails = set(scheduled_employees).intersection(employee_emails)
# # if no employee found with matching phone number then show popup to scheduled employees
# emails = emails or scheduled_employees if employee_emails
for email in emails:
frappe.publish_realtime('show_call_popup', self, user=email)
@frappe.whitelist()
def add_call_summary(call_log, summary):
doc = frappe.get_doc('Call Log', call_log)
doc.add_comment('Comment', frappe.bold(_('Call Summary')) + '<br><br>' + summary)
def get_employees_with_number(number):
number = strip_number(number)
if not number: return []
employee_emails = frappe.cache().hget('employees_with_number', number)
if employee_emails: return employee_emails
employees = frappe.get_all('Employee', filters={
'cell_number': ['like', '%{}'.format(number)],
'user_id': ['!=', '']
}, fields=['user_id'])
employee_emails = [employee.user_id for employee in employees]
frappe.cache().hset('employees_with_number', number, employee_emails)
return employee
def set_caller_information(doc, state):
'''Called from hooks on creation of Lead or Contact'''
if doc.doctype not in ['Lead', 'Contact']: return
numbers = [doc.get('phone'), doc.get('mobile_no')]
# contact for Contact and lead for Lead
fieldname = doc.doctype.lower()
# contact_name or lead_name
display_name_field = '{}_name'.format(fieldname)
for number in numbers:
number = strip_number(number)
if not number: continue
filters = frappe._dict({
'from': ['like', '%{}'.format(number)],
fieldname: ''
})
logs = frappe.get_all('Call Log', filters=filters)
for log in logs:
frappe.db.set_value('Call Log', log.name, {
fieldname: doc.name,
display_name_field: doc.get_title()
}, update_modified=False)
| Python | 0.000003 |
17af071faa70d3dc4a884f62fb50f34e8621ac6d | Update watchman/constants.py | watchman/constants.py | watchman/constants.py | DEFAULT_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
'watchman.checks.storage',
)
PAID_CHECKS = (
'watchman.checks.email',
)
| DEFAULT_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
'watchman.checks.storage',
)
PAID_CHECKS = (
'watchman.checks.email',
)
| Python | 0 |
6fedc3e826220f69ffd503b7c73e02962cfc1752 | use cp.testing.assert_array_equal | examples/gemm/sgemm.py | examples/gemm/sgemm.py | from __future__ import division
import argparse
import math
import cupy as cp
import numpy as np
from utils import benchmark
from utils import load_kernel
from utils import read_code
sgemm_file = 'sgemm.cu'
def sgemm(A, B,
dim_x=16, dim_y=16, blk_m=64, blk_n=64, blk_k=4,
dim_xa=64, dim_ya=4, dim_xb=4, dim_yb=64):
assert A.dtype == cp.float32
assert B.dtype == cp.float32
assert(dim_x * dim_y == dim_xa * dim_ya == dim_xb * dim_yb)
m, k = A.shape
k, n = B.shape
# Inputs matrices need to be in Fortran order.
A = cp.asfortranarray(A)
B = cp.asfortranarray(B)
C = cp.empty((m, n), dtype=cp.float32, order='F')
config = {'DIM_X': dim_x, 'DIM_Y': dim_y,
'BLK_M': blk_m, 'BLK_N': blk_n, 'BLK_K': blk_k,
'DIM_XA': dim_xa, 'DIM_YA': dim_ya,
'DIM_XB': dim_xb, 'DIM_YB': dim_yb,
'THR_M': blk_m // dim_x, 'THR_N': blk_n // dim_y}
code = read_code(sgemm_file, params=config)
kern = load_kernel('sgemm', code)
grid = (math.ceil(m / blk_m), math.ceil(n / blk_n), 1)
block = (dim_x, dim_y, 1)
args = (m, n, k, A, B, C)
shared_mem = blk_k * (blk_m + 1) * 4 + blk_n * (blk_k + 1) * 4
kern(grid, block, args=args, shared_mem=shared_mem)
return C
def main():
parser = argparse.ArgumentParser(
description='SGEMM kernel call from CuPy')
parser.add_argument(
'--m', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--n', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--k', type=int, default=np.random.randint(500, 5000))
args = parser.parse_args()
print('m={} n={} k={}'.format(args.m, args.n, args.k))
print('start benchmarking')
print('')
A = cp.random.uniform(
low=-1., high=1., size=(args.m, args.k)).astype(cp.float32)
B = cp.random.uniform(
low=-1., high=1., size=(args.k, args.n)).astype(cp.float32)
# check correctness
cp.testing.assert_array_equal(sgemm(A, B), cp.dot(A, B))
# dry run
for _ in range(3):
sgemm(A, B)
kernel_times = benchmark(sgemm, (A, B), n_run=5)
for _ in range(3):
cp.dot(A, B)
cublas_times = benchmark(cp.dot, (A, B), n_run=5)
print('=============================Result===============================')
print('hand written kernel time {} ms'.format(np.mean(kernel_times)))
print('cuBLAS time {} ms'.format(np.mean(cublas_times)))
if __name__ == '__main__':
main()
| from __future__ import division
import argparse
import math
import cupy as cp
import numpy as np
from utils import benchmark
from utils import load_kernel
from utils import read_code
sgemm_file = 'sgemm.cu'
def sgemm(A, B,
dim_x=16, dim_y=16, blk_m=64, blk_n=64, blk_k=4,
dim_xa=64, dim_ya=4, dim_xb=4, dim_yb=64):
assert A.dtype == cp.float32
assert B.dtype == cp.float32
assert(dim_x * dim_y == dim_xa * dim_ya == dim_xb * dim_yb)
m, k = A.shape
k, n = B.shape
# Inputs matrices need to be in Fortran order.
A = cp.asfortranarray(A)
B = cp.asfortranarray(B)
C = cp.empty((m, n), dtype=cp.float32, order='F')
config = {'DIM_X': dim_x, 'DIM_Y': dim_y,
'BLK_M': blk_m, 'BLK_N': blk_n, 'BLK_K': blk_k,
'DIM_XA': dim_xa, 'DIM_YA': dim_ya,
'DIM_XB': dim_xb, 'DIM_YB': dim_yb,
'THR_M': blk_m // dim_x, 'THR_N': blk_n // dim_y}
code = read_code(sgemm_file, params=config)
kern = load_kernel('sgemm', code)
grid = (math.ceil(m / blk_m), math.ceil(n / blk_n), 1)
block = (dim_x, dim_y, 1)
args = (m, n, k, A, B, C)
shared_mem = blk_k * (blk_m + 1) * 4 + blk_n * (blk_k + 1) * 4
kern(grid, block, args=args, shared_mem=shared_mem)
return C
def main():
parser = argparse.ArgumentParser(
description='SGEMM kernel call from CuPy')
parser.add_argument(
'--m', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--n', type=int, default=np.random.randint(5000, 12000))
parser.add_argument(
'--k', type=int, default=np.random.randint(500, 5000))
args = parser.parse_args()
print('m={} n={} k={}'.format(args.m, args.n, args.k))
print('start benchmarking')
print('')
A = cp.random.uniform(
low=-1., high=1., size=(args.m, args.k)).astype(cp.float32)
B = cp.random.uniform(
low=-1., high=1., size=(args.k, args.n)).astype(cp.float32)
# check correctness
np.testing.assert_equal(sgemm(A, B).get(), cp.dot(A, B).get())
# dry run
for _ in range(3):
sgemm(A, B)
kernel_times = benchmark(sgemm, (A, B), n_run=5)
for _ in range(3):
cp.dot(A, B)
cublas_times = benchmark(cp.dot, (A, B), n_run=5)
print('=============================Result===============================')
print('hand written kernel time {} ms'.format(np.mean(kernel_times)))
print('cuBLAS time {} ms'.format(np.mean(cublas_times)))
if __name__ == '__main__':
main()
| Python | 0.000337 |
e1a7e4535e64c005fb508ba6d3fed021bbd40a62 | Update only tables in visible schemas | oedb_datamodels/versions/1a73867b1e79_add_meta_search.py | oedb_datamodels/versions/1a73867b1e79_add_meta_search.py | """Add meta_search table
Revision ID: 1a73867b1e79
Revises: 1c6e2fb3d3b6
Create Date: 2019-04-29 11:47:04.783168
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm.session import sessionmaker
from api.actions import update_meta_search
from dataedit.views import schema_whitelist
# revision identifiers, used by Alembic.
revision = "1a73867b1e79"
down_revision = "1c6e2fb3d3b6"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"meta_search",
sa.Column("schema", sa.String(length=100), nullable=False),
sa.Column("table", sa.String(length=100), nullable=False),
sa.Column("comment", postgresql.TSVECTOR(), nullable=True),
sa.PrimaryKeyConstraint("schema", "table"),
schema="public",
)
conn = op.get_bind()
meta = sa.MetaData(bind=conn)
meta.reflect()
for table in meta.tables.values():
if table.schema in schema_whitelist:
update_meta_search(table.name, table.schema)
def downgrade():
op.drop_table("meta_search", schema="public")
| """Add meta_search table
Revision ID: 1a73867b1e79
Revises: 1c6e2fb3d3b6
Create Date: 2019-04-29 11:47:04.783168
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm.session import sessionmaker
from api.actions import update_meta_search
# revision identifiers, used by Alembic.
revision = "1a73867b1e79"
down_revision = "1c6e2fb3d3b6"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"meta_search",
sa.Column("schema", sa.String(length=100), nullable=False),
sa.Column("table", sa.String(length=100), nullable=False),
sa.Column("comment", postgresql.TSVECTOR(), nullable=True),
sa.PrimaryKeyConstraint("schema", "table"),
schema="public",
)
conn = op.get_bind()
meta = sa.MetaData(bind=conn)
meta.reflect()
for table in meta.tables.values():
update_meta_search(table.name, table.schema)
def downgrade():
op.drop_table("meta_search", schema="public")
| Python | 0 |
f1fec3790fee11ff3d83c272e3a2aa7bb548ddfa | Remove print | open_spiel/python/algorithms/expected_game_score_test.py | open_spiel/python/algorithms/expected_game_score_test.py | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
import open_spiel.python.games
import pyspiel
class PolicyValueTest(absltest.TestCase):
def test_expected_game_score_uniform_random_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [uniform_policy] * 2)
self.assertTrue(np.allclose(uniform_policy_values, [1 / 8, -1 / 8]))
def test_expected_game_score_uniform_random_iterated_prisoner_dilemma(self):
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=6)")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), uniform_policy)
self.assertTrue(
np.allclose(uniform_policy_values, [17.6385498, 17.6385498]))
if __name__ == "__main__":
absltest.main()
| # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
import open_spiel.python.games
import pyspiel
class PolicyValueTest(absltest.TestCase):
def test_expected_game_score_uniform_random_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [uniform_policy] * 2)
self.assertTrue(np.allclose(uniform_policy_values, [1 / 8, -1 / 8]))
def test_expected_game_score_uniform_random_iterated_prisoner_dilemma(self):
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=6)")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), uniform_policy)
print(uniform_policy_values)
self.assertTrue(
np.allclose(uniform_policy_values, [17.6385498, 17.6385498]))
if __name__ == "__main__":
absltest.main()
| Python | 0.000016 |
fa52bbde01f62bb0816e71970ac50761947afa72 | Improve comment | retaining_wall.py | retaining_wall.py | class RetainingWallSolver(object):
def retaining_wall(self, wood_lengths, required_lengths):
self.required_lengths = required_lengths
return self.retaining_wall_recursive(wood_lengths, len(required_lengths) - 1)
def retaining_wall_recursive(self, wood_lengths, required_length_idx):
if required_length_idx <= -1:
return {
'cuts': []
}
current_required_length = self.required_lengths[required_length_idx]
possible_subsolutions = []
for wood_length_idx in range(len(wood_lengths) - 1, -1, -1):
if wood_lengths[wood_length_idx] < current_required_length:
# cant cut from this length
continue
# what if we chose to cut current_required_length out of this wood length
new_wood_lengths = list(wood_lengths)
new_wood_lengths[wood_length_idx] -= current_required_length
subsolution = self.retaining_wall_recursive(new_wood_lengths, required_length_idx - 1)
if not subsolution:
continue
if new_wood_lengths[wood_length_idx] != 0:
subsolution['cuts'].append({
'wood_num': wood_length_idx,
'cut_amount': current_required_length
})
possible_subsolutions.append(subsolution)
if len(possible_subsolutions) == 0:
return False
# return the solution with the least number of cuts
return min(possible_subsolutions, key=lambda s: len(s['cuts']))
| class RetainingWallSolver(object):
def retaining_wall(self, wood_lengths, required_lengths):
self.required_lengths = required_lengths
return self.retaining_wall_recursive(wood_lengths, len(required_lengths) - 1)
def retaining_wall_recursive(self, wood_lengths, required_length_idx):
if required_length_idx <= -1:
return {
'cuts': []
}
current_required_length = self.required_lengths[required_length_idx]
possible_subsolutions = []
for wood_length_idx in range(len(wood_lengths) - 1, -1, -1):
if wood_lengths[wood_length_idx] < current_required_length:
# cant cut from this length
continue
# what if we chose to cut this required length out of this wood length
new_wood_lengths = list(wood_lengths)
new_wood_lengths[wood_length_idx] -= current_required_length
subsolution = self.retaining_wall_recursive(new_wood_lengths, required_length_idx - 1)
if not subsolution:
continue
if new_wood_lengths[wood_length_idx] != 0:
subsolution['cuts'].append({
'wood_num': wood_length_idx,
'cut_amount': current_required_length
})
possible_subsolutions.append(subsolution)
if len(possible_subsolutions) == 0:
return False
# return the solution with the least number of cuts
return min(possible_subsolutions, key=lambda s: len(s['cuts']))
| Python | 0 |
b4a9380c73dd367c2cf6249cdf4cdbbdfdbc7907 | fix example | examples/pythonnews.py | examples/pythonnews.py | """
Extract python news from python.org
"""
import re
import logging
from pomp.core.base import BaseCrawler, BasePipeline
from pomp.core.item import Item, Field
from pomp.contrib import SimpleDownloader
logging.basicConfig(level=logging.DEBUG)
news_re = re.compile(r'<h2 class="news">(.*?)</h2>([\s\S]*?)<div class="pubdate">(.*?)</div>')
class PythonNewsItem(Item):
title = Field()
published = Field()
def __repr__(self):
return '%s\n\t%s\n' % (
self.title,
self.published,
)
class PythonNewsCrawler(BaseCrawler):
ENTRY_URL = 'http://python.org/news/'
def extract_items(self, response):
for i in news_re.findall(response.body.decode('utf-8')):
item = PythonNewsItem()
item.title, item.published = i[0], i[2]
yield item
def next_url(self, response):
return None # one page crawler
class PrintPipeline(BasePipeline):
def process(self, crawler, item):
print(item)
if __name__ == '__main__':
from pomp.core.engine import Pomp
pomp = Pomp(
downloader=SimpleDownloader(),
pipelines=[PrintPipeline()],
)
pomp.pump(PythonNewsCrawler())
| """
Extract python news from python.org
"""
import re
import logging
from pomp.core.base import BaseCrawler, BasePipeline
from pomp.core.item import Item, Field
from pomp.contrib import SimpleDownloader
logging.basicConfig(level=logging.DEBUG)
news_re = re.compile(r'<h2 class="news">(.*?)</h2>([\s\S]*?)<div class="pubdate">(.*?)</div>')
class PythonNewsItem(Item):
title = Field()
published = Field()
def __repr__(self):
return '%s\n\t%s\n' % (
self.title,
self.published,
)
class PythonNewsCrawler(BaseCrawler):
ENTRY_URL = 'http://python.org/news/'
def extract_items(self, response):
for i in news_re.findall(response.body.decode('utf-8')):
item = PythonNewsItem()
item.title, item.published = i[0], i[2]
yield item
def next_url(self, response):
return None # one page crawler
class PrintPipeline(BasePipeline):
def process(self, item):
print(item)
if __name__ == '__main__':
from pomp.core.engine import Pomp
pomp = Pomp(
downloader=SimpleDownloader(),
pipelines=[PrintPipeline()],
)
pomp.pump(PythonNewsCrawler())
| Python | 0.0001 |
e3c42442f090b8b6982f7ff8c93632c43cfa80b3 | use insights landing for offseason | tba_config.py | tba_config.py | import os
DEBUG = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
MAX_YEAR = 2015
# For choosing what the main landing page displays
KICKOFF = 1
BUILDSEASON = 2
COMPETITIONSEASON = 3
OFFSEASON = 4
INSIGHTS = 5
CHAMPS = 6
# The CONFIG variables should have exactly the same structure between environments
# Eventually a test environment should be added. -gregmarra 17 Jul 2012
if DEBUG:
CONFIG = {
"env": "dev",
"memcache": False,
"response_cache": False,
"firebase-url": "https://thebluealliance-dev.firebaseio.com/{}.json?auth={}"
}
else:
CONFIG = {
"env": "prod",
"memcache": True,
"response_cache": True,
"firebase-url": "https://thebluealliance.firebaseio.com/{}.json?auth={}"
}
CONFIG['landing_handler'] = INSIGHTS
CONFIG["static_resource_version"] = 7
| import os
DEBUG = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
MAX_YEAR = 2015
# For choosing what the main landing page displays
KICKOFF = 1
BUILDSEASON = 2
COMPETITIONSEASON = 3
OFFSEASON = 4
INSIGHTS = 5
CHAMPS = 6
# The CONFIG variables should have exactly the same structure between environments
# Eventually a test environment should be added. -gregmarra 17 Jul 2012
if DEBUG:
CONFIG = {
"env": "dev",
"memcache": False,
"response_cache": False,
"firebase-url": "https://thebluealliance-dev.firebaseio.com/{}.json?auth={}"
}
else:
CONFIG = {
"env": "prod",
"memcache": True,
"response_cache": True,
"firebase-url": "https://thebluealliance.firebaseio.com/{}.json?auth={}"
}
CONFIG['landing_handler'] = OFFSEASON
CONFIG["static_resource_version"] = 7
| Python | 0 |
b860d7cb81488f5ebbe7e9e356a6d4f140c33df5 | update to follow python 2to3 changes | tests/__init__.py | tests/__init__.py | from .test_home import *
from .test_feed import *
from .test_shownote import *
from .test_agenda import *
from .test_episode import *
| from test_home import *
from test_feed import *
from test_shownote import *
from test_agenda import *
from test_episode import *
| Python | 0 |
d77256d1964354eb7dd178f383dd3254c3b4d975 | Fix source docs page | docs/_helpers/source_page.py | docs/_helpers/source_page.py | """Generate a restructured text document that describes built-in sources
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
import string
from sncosmo.models import _SOURCES
lines = [
'',
' '.join([30*'=', 7*'=', 10*'=', 27*'=', 30*'=', 7*'=', 20*'=']),
'{0:30} {1:7} {2:10} {3:27} {4:30} {5:7} {6:50}'.format(
'Name', 'Version', 'Type', 'Subclass', 'Reference', 'Website', 'Notes')
]
lines.append(lines[1])
urlnums = {}
allnotes = []
allrefs = []
for m in _SOURCES.get_loaders_metadata():
reflink = ''
urllink = ''
notelink = ''
if 'note' in m:
if m['note'] not in allnotes:
allnotes.append(m['note'])
notenum = allnotes.index(m['note'])
notelink = '[{0}]_'.format(notenum + 1)
if 'reference' in m:
reflink = '[{0}]_'.format(m['reference'][0])
if m['reference'] not in allrefs:
allrefs.append(m['reference'])
if 'url' in m:
url = m['url']
if url not in urlnums:
if len(urlnums) == 0:
urlnums[url] = 0
else:
urlnums[url] = max(urlnums.values()) + 1
urllink = '`{0}`_'.format(string.ascii_letters[urlnums[url]])
lines.append("{0!r:30} {1!r:7} {2:10} {3:27} {4:30} {5:7} {6:50}"
.format(m['name'], m['version'], m['type'], m['subclass'],
reflink, urllink, notelink))
lines.extend([lines[1], ''])
for refkey, ref in allrefs:
lines.append('.. [{0}] `{1}`__'.format(refkey, ref))
lines.append('')
for url, urlnum in urlnums.items():
lines.append('.. _`{0}`: {1}'.format(string.ascii_letters[urlnum], url))
lines.append('')
for i, note in enumerate(allnotes):
lines.append('.. [{0}] {1}'.format(i + 1, note))
lines.append('')
__doc__ = '\n'.join(lines)
| """Generate a restructured text document that describes built-in sources
and save it to this module's docstring for the purpose of including in
sphinx documentation via the automodule directive."""
import string
from sncosmo.models import _SOURCES
lines = [
'',
' '.join([20*'=', 7*'=', 10*'=', 27*'=', 30*'=', 7*'=', 20*'=']),
'{0:20} {1:7} {2:10} {3:27} {4:30} {5:7} {6:50}'.format(
'Name', 'Version', 'Type', 'Subclass', 'Reference', 'Website', 'Notes')
]
lines.append(lines[1])
urlnums = {}
allnotes = []
allrefs = []
for m in _SOURCES.get_loaders_metadata():
reflink = ''
urllink = ''
notelink = ''
if 'note' in m:
if m['note'] not in allnotes:
allnotes.append(m['note'])
notenum = allnotes.index(m['note'])
notelink = '[{0}]_'.format(notenum + 1)
if 'reference' in m:
reflink = '[{0}]_'.format(m['reference'][0])
if m['reference'] not in allrefs:
allrefs.append(m['reference'])
if 'url' in m:
url = m['url']
if url not in urlnums:
if len(urlnums) == 0:
urlnums[url] = 0
else:
urlnums[url] = max(urlnums.values()) + 1
urllink = '`{0}`_'.format(string.ascii_letters[urlnums[url]])
lines.append("{0!r:20} {1!r:7} {2:10} {3:27} {4:30} {5:7} {6:50}"
.format(m['name'], m['version'], m['type'], m['subclass'],
reflink, urllink, notelink))
lines.extend([lines[1], ''])
for refkey, ref in allrefs:
lines.append('.. [{0}] `{1}`__'.format(refkey, ref))
lines.append('')
for url, urlnum in urlnums.items():
lines.append('.. _`{0}`: {1}'.format(string.ascii_letters[urlnum], url))
lines.append('')
for i, note in enumerate(allnotes):
lines.append('.. [{0}] {1}'.format(i + 1, note))
lines.append('')
__doc__ = '\n'.join(lines)
| Python | 0.000001 |
5f5a7ec9460d60a964663ace670529813a41a9d9 | Update bluetooth_ping_test.py | tests/bluetooth_ping_test.py | tests/bluetooth_ping_test.py | #!/usr/bin/env python
import os
import subprocess as subp
from subprocess import *
from avocado import Test
#I have used my Samsung Galaxy S7 Edge as target device
class WifiScanAP(Test):
def test():
targetDeviceMac = '8C:1A:BF:0D:31:A9'
bluetoothChannel = '2'
port = 1
print("Bluetooth ping test: testing " + targetDeviceMac)
p = subp.Popen(['sudo', 'l2ping', '8C:1A:BF:0D:31:A9','-c', '5'], stdout=subp.PIPE, stderr=subp.PIPE)
stdout, stderr = p.communicate()
res = stdout.rstrip()
if "5 sent, 5 received" in res:
self.log.debug("Bluetooth ping test succeeded: + res")
else:
self.fail("Bluetooth ping test: pinging " + targetDeviceMac + " failed")
| #!/usr/bin/env python
import os
import subprocess as subp
from subprocess import *
from avocado import Test
class WifiScanAP(Test):
def test():
targetDeviceMac = '8C:1A:BF:0D:31:A9'
bluetoothChannel = '2'
port = 1
print("Bluetooth ping test: testing " + targetDeviceMac)
p = subp.Popen(['sudo', 'l2ping', '8C:1A:BF:0D:31:A9','-c', '5'], stdout=subp.PIPE, stderr=subp.PIPE)
stdout, stderr = p.communicate()
res = stdout.rstrip()
if "5 sent, 5 received" in res:
self.log.debug("Bluetooth ping test succeeded: + res")
else:
self.fail("Bluetooth ping test: pinging " + targetDeviceMac + " failed")
| Python | 0.000002 |
9cc45f750c0860715e66c085895611984531c48c | update standalone disclosure url | paying_for_college/config/urls.py | paying_for_college/config/urls.py | from django.conf.urls import url, include
from django.conf import settings
from paying_for_college.views import LandingView, StandAloneView
from django.contrib import admin
from django.conf import settings
try:
STANDALONE = settings.STANDALONE
except AttributeError: # pragma: no cover
STANDALONE = False
urlpatterns = [
url(r'^$',
LandingView.as_view(), name='pfc-landing'),
url(r'^understanding-financial-aid-offers/',
include('paying_for_college.disclosures.urls',
namespace='disclosures')),
url(r'^repaying-student-debt/$',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='pfc-repay'),
url(r'^choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='pfc-choose'),
url(r'^managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='pfc-manage'),
]
if STANDALONE:
urlpatterns += [
url(r'^paying-for-college/admin/', include(admin.site.urls)),
url(r'^paying-for-college/$',
LandingView.as_view(), name='standalone:pfc-landing'),
url(r'^paying-for-college/understanding-financial-aid-offers/',
include('paying_for_college.disclosures.urls',
namespace='standalone-disclosures')),
url(r'^paying-for-college/repaying-student-debt/',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='standalone-pfc-repay'),
url(r'^paying-for-college/choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='standalone-pfc-choose'),
url(r'^paying-for-college/managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='standalone-pfc-manage'),
]
| from django.conf.urls import url, include
from django.conf import settings
from paying_for_college.views import LandingView, StandAloneView
from django.contrib import admin
from django.conf import settings
try:
STANDALONE = settings.STANDALONE
except AttributeError: # pragma: no cover
STANDALONE = False
urlpatterns = [
url(r'^$',
LandingView.as_view(), name='pfc-landing'),
url(r'^understanding-financial-aid-offers/',
include('paying_for_college.disclosures.urls',
namespace='disclosures')),
url(r'^repaying-student-debt/$',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='pfc-repay'),
url(r'^choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='pfc-choose'),
url(r'^managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='pfc-manage'),
]
if STANDALONE:
urlpatterns += [
url(r'^paying-for-college/admin/', include(admin.site.urls)),
url(r'^paying-for-college/$',
LandingView.as_view(), name='standalone:pfc-landing'),
url(r'^paying-for-college/compare-financial-aid-and-college-cost/',
include('paying_for_college.disclosures.urls',
namespace='standalone-disclosures')),
url(r'^paying-for-college/repaying-student-debt/',
StandAloneView.as_view(template_name='repay_student_debt.html'),
name='standalone-pfc-repay'),
url(r'^paying-for-college/choosing-a-student-loan/$',
StandAloneView.as_view(template_name='choose_a_loan.html'),
name='standalone-pfc-choose'),
url(r'^paying-for-college/managing-college-money/$',
StandAloneView.as_view(template_name='manage_your_money.html'),
name='standalone-pfc-manage'),
]
| Python | 0 |
43b4910e004e7096addb3d50e8a0a6c307a669c6 | Remove dead get_body_parameter_name_override | lepo/apidef/operation/openapi.py | lepo/apidef/operation/openapi.py | from lepo.apidef.operation.base import Operation
from lepo.apidef.parameter.openapi import OpenAPI3BodyParameter, OpenAPI3Parameter
from lepo.utils import maybe_resolve
class OpenAPI3Operation(Operation):
parameter_class = OpenAPI3Parameter
body_parameter_class = OpenAPI3BodyParameter
def _get_body_parameter(self):
for source in (
self.path.mapping.get('requestBody'),
self.data.get('requestBody'),
):
if source:
source = maybe_resolve(source, self.api.resolve_reference)
body_parameter = self.body_parameter_class(data=source, operation=self, api=self.api)
# TODO: Document x-lepo-body-name
body_parameter.name = self.data.get('x-lepo-body-name', body_parameter.name)
return body_parameter
def get_parameter_dict(self):
parameter_dict = super().get_parameter_dict()
for parameter in parameter_dict.values():
if parameter.in_body: # pragma: no cover
raise ValueError('Regular parameter declared to be in body while parsing OpenAPI 3')
body_parameter = self._get_body_parameter()
if body_parameter:
parameter_dict[body_parameter.name] = body_parameter
return parameter_dict
| from lepo.apidef.operation.base import Operation
from lepo.apidef.parameter.openapi import OpenAPI3BodyParameter, OpenAPI3Parameter
from lepo.utils import maybe_resolve
class OpenAPI3Operation(Operation):
parameter_class = OpenAPI3Parameter
body_parameter_class = OpenAPI3BodyParameter
def _get_body_parameter(self):
for source in (
self.path.mapping.get('requestBody'),
self.data.get('requestBody'),
):
if source:
source = maybe_resolve(source, self.api.resolve_reference)
body_parameter = self.body_parameter_class(data=source, operation=self, api=self.api)
# TODO: Document x-lepo-body-name
body_parameter.name = self.data.get('x-lepo-body-name', body_parameter.name)
return body_parameter
def get_body_parameter_name_override(self):
return
def get_parameter_dict(self):
parameter_dict = super().get_parameter_dict()
for parameter in parameter_dict.values():
if parameter.in_body: # pragma: no cover
raise ValueError('Regular parameter declared to be in body while parsing OpenAPI 3')
body_parameter = self._get_body_parameter()
if body_parameter:
parameter_dict[body_parameter.name] = body_parameter
return parameter_dict
| Python | 0.000064 |
b5a6d540f5fdef37b1d58fc45921737e3c77ae96 | fix user autocomplete | let_me_app/views/autocomplete.py | let_me_app/views/autocomplete.py | from dal import autocomplete
from slugify import slugify
from let_me_auth.models import User
from let_me_app.models import Equipment, StaffRole
from let_me_auth.social.pipeline import ABSENT_MAIL_HOST
import re
class UserAutocomplete(autocomplete.Select2QuerySetView):
create_field = 'username'
def create_object(self, text):
cell_phone = re.findall(r'\+?(\d{9,12})', text)
if cell_phone:
cell_phone = cell_phone[0]
text = re.sub(r'\+?(\d{9,12})', '', text).strip()
parts = text.split(' ', 1)
first_name = parts[0].strip()
email_parts = [slugify(first_name)]
defaults = {'first_name': first_name}
if len(parts) > 1:
last_name = parts[1].strip()
defaults['last_name'] = last_name
email_parts.append(slugify(last_name))
email = '@'.join(['.'.join(email_parts), ABSENT_MAIL_HOST])
if cell_phone:
required = {'cell_phone': cell_phone}
defaults.update({'email': email})
else:
required = {'email': email}
user, _ = User.objects.get_or_create(defaults=defaults, **required)
return user
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return User.objects.none()
qs = User.objects.all()
if self.q:
qs = User.objects.filter(first_name__istartswith=self.q)
qs = qs | User.objects.filter(last_name__istartswith=self.q)
qs = qs | User.objects.filter(email__istartswith=self.q)
return qs
class EquipmentAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return Equipment.objects.none()
qs = Equipment.objects.all()
if self.q:
qs = Equipment.objects.filter(name__istartswith=self.q)
return qs
class StaffRoleAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return StaffRole.objects.none()
qs = StaffRole.objects.all()
if self.q:
qs = StaffRole.objects.filter(name__istartswith=self.q)
return qs
| from dal import autocomplete
from slugify import slugify
from let_me_auth.models import User
from let_me_app.models import Equipment, StaffRole
class UserAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return User.objects.none()
qs = User.objects.all()
if self.q:
qs = User.objects.filter(first_name__istartswith=self.q)
qs = qs | User.objects.filter(last_name__istartswith=self.q)
qs = qs | User.objects.filter(email__istartswith=self.q)
return qs
class EquipmentAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return Equipment.objects.none()
qs = Equipment.objects.all()
if self.q:
qs = Equipment.objects.filter(name__istartswith=self.q)
return qs
class StaffRoleAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated():
return StaffRole.objects.none()
qs = StaffRole.objects.all()
if self.q:
qs = StaffRole.objects.filter(name__istartswith=self.q)
return qs
| Python | 0.000066 |
060576768e02c0499282770dd22e35048d62b12e | Improve clarity of session finish function | tests/conftest.py | tests/conftest.py | from __future__ import print_function
import os
import boto
import pytest
from boto.s3.key import Key as S3Key
from boto.exception import NoAuthHandlerFound
from os.path import join
s3_bucket = "bokeh-travis"
s3 = "https://s3.amazonaws.com/%s" % s3_bucket
build_id = os.environ.get("TRAVIS_BUILD_ID")
# Can we make this not hard coded and read in the report location from pytest?
report_file = "tests/pytest-report.html"
def pytest_sessionfinish(session, exitstatus):
if os.environ.get("UPLOAD_PYTEST_HTML", "False") != "True":
return
if hasattr(session.config, 'slaveinput'):
# when slave nodes (xdist) finish, the report won't be ready
return
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
with open(report_file, "r") as f:
html = f.read()
filename = join(build_id, "report.html")
key = S3Key(bucket, filename)
key.set_metadata("Content-Type", "text/html")
key.set_contents_from_string(html, policy="public-read")
print("\n%s Access report at: %s" % ("---", join(s3, filename)))
except NoAuthHandlerFound:
print("Upload was requested but could not connect to S3.")
except OSError:
print("Upload was requested but report was not generated.")
@pytest.fixture(scope="session")
def capabilities(capabilities):
capabilities["browserName"] = "firefox"
capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
return capabilities
| from __future__ import print_function
import os
import boto
import pytest
from boto.s3.key import Key as S3Key
from boto.exception import NoAuthHandlerFound
from os.path import join, isfile
s3_bucket = "bokeh-travis"
s3 = "https://s3.amazonaws.com/%s" % s3_bucket
build_id = os.environ.get("TRAVIS_BUILD_ID")
# Can we make this not hard coded and read in the report location from pytest?
report_file = "tests/pytest-report.html"
def pytest_sessionfinish(session, exitstatus):
try_upload = os.environ.get("UPLOAD_PYTEST_HTML", "False") == "True"
report_ready = isfile(report_file)
if try_upload and report_ready:
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(s3_bucket)
upload = True
except NoAuthHandlerFound:
print("Upload was requested but could not connect to S3.")
upload = False
if upload is True:
with open(report_file, "r") as f:
html = f.read()
filename = join(build_id, "report.html")
key = S3Key(bucket, filename)
key.set_metadata("Content-Type", "text/html")
key.set_contents_from_string(html, policy="public-read")
print("\n%s Access report at: %s" % ("---", join(s3, filename)))
@pytest.fixture(scope="session")
def capabilities(capabilities):
capabilities["browserName"] = "firefox"
capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
return capabilities
| Python | 0 |
36a00bd6ece27b89843a856cd2b99d25a1d0e4d3 | Modify conftest.py to support Python 3.5 only | tests/conftest.py | tests/conftest.py | # -*- coding: utf-8 -*-
"""Used by pytest to do some preparation work before running tests."""
#
# (C) Pywikibot team, 2016-2020
#
# Distributed under the terms of the MIT license.
#
import sys
def pytest_configure(config):
"""Set the sys._test_runner_pytest flag to True, if pytest is used."""
sys._test_runner_pytest = True
| # -*- coding: utf-8 -*-
"""Used by pytest to do some preparation work before running tests."""
#
# (C) Pywikibot team, 2016-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import sys
def pytest_configure(config):
"""Set the sys._test_runner_pytest flag to True, if pytest is used."""
sys._test_runner_pytest = True
| Python | 0.000002 |
d3fbe9934329df1b1c5f752e4a43981b4fc8beae | Use pathlib.Path | tests/conftest.py | tests/conftest.py | import pathlib
import pytest
from _pytest.compat import LEGACY_PATH
from libvcs.shortcuts import create_repo_from_pip_url
from libvcs.util import run
@pytest.fixture(scope="function")
def tmpdir_repoparent(tmp_path: pathlib.Path):
"""Return temporary directory for repository checkout guaranteed unique."""
fn = tmp_path
return fn
@pytest.fixture
def git_repo_kwargs(tmpdir_repoparent: pathlib.Path, git_dummy_repo_dir):
"""Return kwargs for :func:`create_repo_from_pip_url`."""
repo_name = "repo_clone"
return {
"url": "git+file://" + git_dummy_repo_dir,
"parent_dir": str(tmpdir_repoparent),
"name": repo_name,
}
@pytest.fixture
def git_repo(git_repo_kwargs):
"""Create an git repository for tests. Return repo."""
git_repo = create_repo_from_pip_url(**git_repo_kwargs)
git_repo.obtain(quiet=True)
return git_repo
@pytest.fixture
def create_git_dummy_repo(tmpdir_repoparent):
def fn(repo_name, testfile_filename="testfile.test"):
repo_path = str(tmpdir_repoparent / repo_name)
run(["git", "init", repo_name], cwd=str(tmpdir_repoparent))
run(["touch", testfile_filename], cwd=repo_path)
run(["git", "add", testfile_filename], cwd=repo_path)
run(["git", "commit", "-m", "test file for %s" % repo_name], cwd=repo_path)
return repo_path
yield fn
@pytest.fixture
def git_dummy_repo_dir(tmpdir_repoparent, create_git_dummy_repo):
"""Create a git repo with 1 commit, used as a remote."""
return create_git_dummy_repo("dummyrepo")
@pytest.fixture
def config_dir(tmpdir: LEGACY_PATH):
conf_dir = tmpdir.join(".vcspull")
conf_dir.ensure(dir=True)
return conf_dir
| import pytest
from _pytest.compat import LEGACY_PATH
from libvcs.shortcuts import create_repo_from_pip_url
from libvcs.util import run
@pytest.fixture(scope="function")
def tmpdir_repoparent(tmpdir_factory):
"""Return temporary directory for repository checkout guaranteed unique."""
fn = tmpdir_factory.mktemp("repo")
return fn
@pytest.fixture
def git_repo_kwargs(tmpdir_repoparent, git_dummy_repo_dir):
"""Return kwargs for :func:`create_repo_from_pip_url`."""
repo_name = "repo_clone"
return {
"url": "git+file://" + git_dummy_repo_dir,
"parent_dir": str(tmpdir_repoparent),
"name": repo_name,
}
@pytest.fixture
def git_repo(git_repo_kwargs):
"""Create an git repository for tests. Return repo."""
git_repo = create_repo_from_pip_url(**git_repo_kwargs)
git_repo.obtain(quiet=True)
return git_repo
@pytest.fixture
def create_git_dummy_repo(tmpdir_repoparent):
def fn(repo_name, testfile_filename="testfile.test"):
repo_path = str(tmpdir_repoparent.join(repo_name))
run(["git", "init", repo_name], cwd=str(tmpdir_repoparent))
run(["touch", testfile_filename], cwd=repo_path)
run(["git", "add", testfile_filename], cwd=repo_path)
run(["git", "commit", "-m", "test file for %s" % repo_name], cwd=repo_path)
return repo_path
yield fn
@pytest.fixture
def git_dummy_repo_dir(tmpdir_repoparent, create_git_dummy_repo):
"""Create a git repo with 1 commit, used as a remote."""
return create_git_dummy_repo("dummyrepo")
@pytest.fixture
def config_dir(tmpdir: LEGACY_PATH):
conf_dir = tmpdir.join(".vcspull")
conf_dir.ensure(dir=True)
return conf_dir
| Python | 0.000003 |
8913f5d6a06e0f25d1c8c1a45e0f5b4da8cbf421 | bump version | rodeo/__init__.py | rodeo/__init__.py | __version__ = "0.1.0"
|
__version__ = "0.0.2"
| Python | 0 |
56b1ef461cfce11ad5e08a031abf175ed73c2081 | Add radius2fov and imagexy_to_pixelXY functions. Clean import. | coordinate_transformations.py | coordinate_transformations.py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 22:35:50 2017
@author: lauri.kangas
"""
import numpy as np
from numpy import sin,cos,arccos,arctan2,mod,pi
import projections
def rotate_RADEC(RAs, DECs, center_RA, center_DEC, output='xyz'):
# rotate RA,DEC coordinates to turn center_RA,center_DEC to origin
# RA can be rotated first
RArotated_RAs = mod(RAs - center_RA, 2*pi)
# convert to rectangular coordinates
RArotated_x, \
RArotated_y, \
RArotated_z = RADEC_to_xyz(RArotated_RAs, DECs)
# now we can rotate by center_DEC.
RADECrotated_x, \
RADECrotated_y, \
RADECrotated_z = tilt_xyz_y(RArotated_x, \
RArotated_y, \
RArotated_z, center_DEC)
if output.lower() == 'xyz':
return RADECrotated_x, RADECrotated_y, RADECrotated_z
elif output.lower() == 'radec':
# calculate RA/DEC again
return None
def RADEC_to_xyz(RA, DEC):
x = cos(RA)*cos(DEC)
y = sin(RA)*cos(DEC)
z = sin(DEC)
return x,y,z
def tilt_xyz_y(x, y, z, angle, x_only=False):
# tilt xyz coordinates along y_axis by amount angle
# x_only: if only radius matters, (for gsc region selection),
# don't calculate y and z
xx = x*cos(angle)+z*sin(angle)
if x_only:
return xx
yy = y
zz = -x*sin(angle)+z*cos(angle)
return xx,yy,zz
def xyz_radius_from_origin(x, *args):
return arccos(x)
def fov_radius(fov, projection=projections.stereographic):
# return half-diagonal radius of rectangular fov of given width/height
# with given projection
fov = np.radians(np.array(fov)) # if fov wasn't already array
half_fov_angle = fov/2
half_fov_imageplane = projection(half_fov_angle)
half_diagonal_imageplane = np.hypot(*half_fov_imageplane)
half_diagonal_radians = projection(half_diagonal_imageplane, inverse=True)
return np.degrees(half_diagonal_radians)
def radius2fov(radius, aspect_ratio, projection=projections.stereographic):
# aspect_ratio = height/width
half_diagonal_radians = np.radians(radius)
half_diagonal_imageplane = projection(half_diagonal_radians)
diagonal_imageplane = 2 * half_diagonal_imageplane
width_imageplane = diagonal_imageplane**2 / (1 + aspect_ratio**2)
height_imageplane = aspect_ratio*width_imageplane
fov_imageplane = np.array([width_imageplane, height_imageplane])
half_fov_imageplane = fov_imageplane/2
half_fov_radians = projection(half_fov_imageplane, inverse=True)
fov_radians = half_fov_radians*2
return np.degrees(fov_radians), np.array([width_imageplane, height_imageplane])
def xyz_to_imagexy(x, y, z, \
rotation=0, projection=projections.stereographic, include_R=False):
# project xyz coordinates on a sphere to image plane
# R can be returned for filtering GSR regions
# calculate angular distance from image center along sphere
R = xyz_radius_from_origin(x)
r = projection(R)
# polar angle of region coordinates in image plane
T = arctan2(z, y)
T += rotation
image_x = -r * cos(T)
image_y = r * sin(T)
if include_R:
return image_x, image_y, R
return image_x, image_y
# transform X/Y star locations from image plane coordinates to pixel coordinates (non-integer)
# in: X/Y stars, sensor dimensions, pixel counts
def imagexy_to_pixelXY(xy, sensor_size=None, resolution=None, pixel_scale=None, axis='ij'):
# x,y star locations on image plane to X,Y pixel coordinates (non-integer)
x, y = xy
if axis == 'ij':
y *= -1
else: # 'xy'
pass
sensor_width, sensor_height = sensor_size
pixels_x, pixels_y = resolution
X = (x+sensor_width)/sensor_width*pixels_x/2
Y = (y+sensor_height)/sensor_height*pixels_y/2
return X, Y
| # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 22:35:50 2017
@author: lauri.kangas
"""
import numpy as np
from numpy import sin,cos,arccos,arctan2,mod,pi
from projections import stereographic
def rotate_RADEC(RAs, DECs, center_RA, center_DEC, output='xyz'):
# rotate RA,DEC coordinates to turn center_RA,center_DEC to origin
# RA can be rotated first
RArotated_RAs = mod(RAs - center_RA, 2*pi)
# convert to rectangular coordinates
RArotated_x, \
RArotated_y, \
RArotated_z = RADEC_to_xyz(RArotated_RAs, DECs)
# now we can rotate by center_DEC.
RADECrotated_x, \
RADECrotated_y, \
RADECrotated_z = tilt_xyz_y(RArotated_x, \
RArotated_y, \
RArotated_z, center_DEC)
if output.lower() == 'xyz':
return RADECrotated_x, RADECrotated_y, RADECrotated_z
elif output.lower() == 'radec':
# calculate RA/DEC again
return None
def RADEC_to_xyz(RA, DEC):
x = cos(RA)*cos(DEC)
y = sin(RA)*cos(DEC)
z = sin(DEC)
return x,y,z
def tilt_xyz_y(x, y, z, angle, x_only=False):
# tilt xyz coordinates along y_axis by amount angle
# x_only: if only radius matters, (for gsc region selection),
# don't calculate y and z
xx = x*cos(angle)+z*sin(angle)
if x_only:
return xx
yy = y
zz = -x*sin(angle)+z*cos(angle)
return xx,yy,zz
def xyz_radius_from_origin(x, *args):
return arccos(x)
def fov_radius(fov, projection=stereographic):
# return half-diagonal radius of rectangular fov of given width/height
# with given projection
fov = np.radians(np.array(fov)) # if fov wasn't already array
half_fov_angle = fov/2
half_fov_imageplane = projection(half_fov_angle)
half_diagonal_imageplane = np.hypot(*half_fov_imageplane)
half_diagonal_radians = projection(half_diagonal_imageplane, inverse=True)
return np.degrees(half_diagonal_radians)
def xyz_to_imagexy(x, y, z, \
rotation=0, projection=stereographic, include_R=False):
# project xyz coordinates on a sphere to image plane
# R can be returned for filtering GSR regions
# calculate angular distance from image center along sphere
R = xyz_radius_from_origin(x)
r = projection(R)
# polar angle of region coordinates in image plane
T = arctan2(z, y)
T += rotation
image_x = -r * cos(T)
image_y = r * sin(T)
if include_R:
return image_x, image_y, R
return image_x, image_y
| Python | 0 |
7eb10376b585e56faad4672959f6654f2500a38d | Add `one` as shortcut to `dimensionless_unscaled` | astropy/units/__init__.py | astropy/units/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<http://code.google.com/p/pynbody/>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from .si import *
from .astrophys import *
from .cgs import *
from .physical import *
from .equivalencies import *
del bases
one = dimensionless_unscaled
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys])
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for defining and converting
between different physical units.
This code is adapted from the `pynbody
<http://code.google.com/p/pynbody/>`_ units module written by Andrew
Pontzen, who has granted the Astropy project permission to use the
code under a BSD license.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .core import *
from .quantity import *
from . import si
from . import cgs
from . import astrophys
from .si import *
from .astrophys import *
from .cgs import *
from .physical import *
from .equivalencies import *
del bases
# Enable the set of default units. This notably does *not* include
# Imperial units.
set_enabled_units([si, cgs, astrophys])
| Python | 0.999976 |
865940bd126c7c45b7c615f751244a46176aca4d | Update version to 2.3b2-dev | openslides/__init__.py | openslides/__init__.py | __author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.3b2-dev'
__license__ = 'MIT'
__url__ = 'https://openslides.org'
args = None
| __author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.3b1'
__license__ = 'MIT'
__url__ = 'https://openslides.org'
args = None
| Python | 0 |
8a9fa06c36a89e3fde93059cfbe827506d5b8b62 | Disable exception logging of status code 500 during testing. | orchard/errors/e500.py | orchard/errors/e500.py | # -*- coding: utf-8 -*-
"""
This module sets up the view for handling ``500 Internal Server Error`` errors.
"""
import datetime
import flask
import flask_classful
from orchard.errors import blueprint
class Error500View(flask_classful.FlaskView):
"""
View for ``500 Internal Server Error`` errors.
"""
trailing_slash = False
@blueprint.app_errorhandler(500)
@blueprint.app_errorhandler(Exception)
def index(self) -> str:
"""
Display the error page for internal errors and send a mail to all administrators
information them of this error.
:return: A page explaining the error.
"""
message = ('Time: {time}\n' +
'Request: {method} {path}\n' +
'Agent: {agent_platform} | {agent_browser} {agent_browser_version}\n' +
'Raw Agent: {agent}\n\n'
).format(time = datetime.datetime.now(),
method = flask.request.method,
path = flask.request.path,
agent_platform = flask.request.user_agent.platform,
agent_browser = flask.request.user_agent.browser,
agent_browser_version = flask.request.user_agent.version,
agent = flask.request.user_agent.string)
if not flask.current_app.testing: # pragma: no cover.
flask.current_app.logger.exception(message)
return flask.render_template('errors/500.html')
Error500View.register(blueprint)
| # -*- coding: utf-8 -*-
"""
This module sets up the view for handling ``500 Internal Server Error`` errors.
"""
import datetime
import flask
import flask_classful
from orchard.errors import blueprint
class Error500View(flask_classful.FlaskView):
"""
View for ``500 Internal Server Error`` errors.
"""
trailing_slash = False
@blueprint.app_errorhandler(500)
@blueprint.app_errorhandler(Exception)
def index(self) -> str:
"""
Display the error page for internal errors and send a mail to all administrators
information them of this error.
:return: A page explaining the error.
"""
message = ('Time: {time}\n' +
'Request: {method} {path}\n' +
'Agent: {agent_platform} | {agent_browser} {agent_browser_version}\n' +
'Raw Agent: {agent}\n\n'
).format(time = datetime.datetime.now(),
method = flask.request.method,
path = flask.request.path,
agent_platform = flask.request.user_agent.platform,
agent_browser = flask.request.user_agent.browser,
agent_browser_version = flask.request.user_agent.version,
agent = flask.request.user_agent.string)
flask.current_app.logger.exception(message)
return flask.render_template('errors/500.html')
Error500View.register(blueprint)
| Python | 0 |
b8d693a8fd2e0fb9fa8592b9672bc71e874547d3 | Bump version to 0.1.1 | fancypages/__init__.py | fancypages/__init__.py | import os
__version__ = (0, 1, 1, 'alpha', 1)
def get_fancypages_paths(path):
""" Get absolute paths for *path* relative to the project root """
return [os.path.join(os.path.dirname(os.path.abspath(__file__)), path)]
def get_apps():
return (
'django_extensions',
# used for image thumbnailing
'sorl.thumbnail',
# framework used for the internal API
'rest_framework',
# provides a convenience layer around model inheritance
# that makes lookup of nested models easier. This is used
# for the content block hierarchy.
'model_utils',
# static file compression and collection
'compressor',
# migration handling
'south',
# package used for twitter block
'twitter_tag',
# actual apps provided by fancypages
'fancypages.assets',
'fancypages',
)
| import os
__version__ = (0, 1, 0, 'alpha', 1)
def get_fancypages_paths(path):
""" Get absolute paths for *path* relative to the project root """
return [os.path.join(os.path.dirname(os.path.abspath(__file__)), path)]
def get_apps():
return (
'django_extensions',
# used for image thumbnailing
'sorl.thumbnail',
# framework used for the internal API
'rest_framework',
# provides a convenience layer around model inheritance
# that makes lookup of nested models easier. This is used
# for the content block hierarchy.
'model_utils',
# static file compression and collection
'compressor',
# migration handling
'south',
# package used for twitter block
'twitter_tag',
# actual apps provided by fancypages
'fancypages.assets',
'fancypages',
)
| Python | 0.000002 |
4234da1953121f6384e366966ec3ce59d604c00c | Add tests to verify softwarecontainer functionality | lib/oeqa/runtime/cases/softwarecontainer.py | lib/oeqa/runtime/cases/softwarecontainer.py | import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.utils.decorators import skipUnlessPassed
class SoftwareContainerTest(OERuntimeTestCase):
def test_softwarecontainer_install(self):
(status, _) = self.target.run('rpm -qa | grep softwarecontainer')
self.assertEqual(status, 0, "softwarecontainer package is not installed")
@skipUnlessPassed("test_softwarecontainer_install")
def test_softwarecontainer_running(self):
(status, _) = self.target.run('ps -ef | grep softwarecontainer-agent')
self.assertEqual(status, 0, msg="No softwarecontainer-agent process running, \
ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1])
@skipUnlessPassed("test_softwarecontainer_running")
def test_softwarecontainer_restart(self):
(status, output) = self.target.run('systemctl restart softwarecontainer-agent')
self.assertEqual(status, 0, "Couldn't restart softwarecontainer: %d %s" % (status, output))
@skipUnlessPassed("test_softwarecontainer_install")
def test_lib_exists(self):
lib_file = "/usr/lib/libsoftwarecontainer.so.0.18.0"
(status, output) = self.target.run('stat %s' % lib_file)
self.assertEqual(status, 0, "Couldn't find %s" % lib_file)
self.assertTrue("file" in output, "%s is not a file" % lib_file)
@skipUnlessPassed("test_softwarecontainer_install")
def test_dbus_config_exists(self):
dbus_config_file = "/etc/dbus-1/system.d/softwarecontainer-agent.conf"
(status, output) = self.target.run('stat %s' % dbus_config_file)
self.assertEqual(status, 0, "Couldn't find %s" % dbus_config_file)
self.assertTrue("file" in output, "%s is not a file" % dbus_config_file)
@skipUnlessPassed("test_softwarecontainer_install")
def test_softwarecontainer_agent(self):
# 1- Create a software container
(status, output) = self.target.run('dbus-send --system --print-reply \
--dest=com.pelagicore.SoftwareContainerAgent \
/com/pelagicore/SoftwareContainerAgent \
com.pelagicore.SoftwareContainerAgent.Create \
string:\'[{"writeBufferEnabled": false}]\'')
self.assertEqual(status, 0, "Create: %d %s" % (status, output))
container_path = "/tmp/container/SC-0/gateways"
(status, output) = self.target.run('stat %s' % container_path)
self.assertEqual(status, 0, "Couldn't find %s" % container_path)
self.assertTrue("directory" in output, "%s is not a directory" % container_path)
# 2- Bind mount a host directory on to container directory
swc_dir = "/home/root/softwarecontainer"
(status, _) = self.target.run('mkdir %s' % swc_dir)
self.assertEqual(status, 0, "Couldn't create %s" % swc_dir)
(status, output) = self.target.run('dbus-send --system --print-reply \
--dest=com.pelagicore.SoftwareContainerAgent \
/com/pelagicore/SoftwareContainerAgent \
com.pelagicore.SoftwareContainerAgent.BindMount \
int32:0 string:\'/home/root/softwarecontainer\' \
string:\'/app\' boolean:false')
bindmount_path = "/tmp/container/SC-0/gateways/app"
(status, output) = self.target.run('stat %s' % bindmount_path)
self.assertEqual(status, 0, "Couldn't find %s" % bindmount_path)
self.assertTrue("directory" in output, "%s is not a directory" % bindmount_path)
# 3- Execute a command that creates a file inside the container
(status, output) = self.target.run('dbus-send --system --print-reply \
--dest=com.pelagicore.SoftwareContainerAgent \
/com/pelagicore/SoftwareContainerAgent \
com.pelagicore.SoftwareContainerAgent.Execute \
int32:0 string:\'touch test.txt\' string:\'/app\' \
string:\'\' dict:string:string:\'\'')
test_file = "/tmp/container/SC-0/gateways/app/test.txt"
(status, output) = self.target.run('stat %s' % test_file)
self.assertEqual(status, 0, "Couldn't find %s" % test_file)
self.assertTrue("file" in output, "%s is not a file" % test_file)
# 4- Destroy the container
(status, output) = self.target.run('dbus-send --system --print-reply \
--dest=com.pelagicore.SoftwareContainerAgent \
/com/pelagicore/SoftwareContainerAgent \
com.pelagicore.SoftwareContainerAgent.Destroy int32:0')
container_path = "/tmp/container/SC-0/gateways"
(status, output) = self.target.run('stat %s' % container_path)
self.assertNotEqual(status, 0, "Failed to remove %s" % container_path)
| import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.utils.decorators import skipUnlessPassed
class SoftwareContainerTest(OERuntimeTestCase):
def test_softwarecontainer_installed(self):
(status, _) = self.target.run('rpm -qa | grep softwarecontainer')
self.assertEqual(status, 0, "softwarecontainer package is not installed")
@skipUnlessPassed("test_softwarecontainer_installed")
def test_softwarecontainer_running(self):
(status, _) = self.target.run('ps -ef | grep softwarecontainer-agent')
self.assertEqual(status, 0, msg="No softwarecontainer-agent process running, ps output: %s" %
self.target.run(oeRuntimeTest.pscmd)[1])
@skipUnlessPassed("test_softwarecontainer_running")
def test_softwarecontainer_restart(self):
(status, output) = self.target.run('systemctl restart softwarecontainer-agent')
self.assertEqual(status, 0, "Couldn't restart softwarecontainer: %d %s" % (status, output))
@skipUnlessPassed("test_softwarecontainer_installed")
def test_lib_exists(self):
lib_file = "/usr/lib/libsoftwarecontainer.so.0"
status = os.path.isfile(lib_file)
self.assertEqual(status, 0, "Couldn't find libsoftwarecontainer.so.0")
status = os.access(lib_file, os.R_OK)
self.assertEqual(status, 0, "Couldn't access libsoftwarecontainer.so.0")
@skipUnlessPassed("test_softwarecontainer_installed")
def test_dbus_config_exists(self):
dbus_config_file = "/etc/dbus-1/system.d/softwarecontainer-agent.conf"
status = os.path.isfile(dbus_config_file)
self.assertEqual(status, 0, "Couldn't find %s" % dbus_config_file)
status = os.access(dbus_config_file, os.R_OK)
self.assertEqual(status, 0, "Couldn't access %s" % dbus_config_file)
| Python | 0 |
b72f3ce27034ba3f810f205d133445267847f667 | fix CSRF get request | mpweb_core/rester.py | mpweb_core/rester.py | # coding: utf-8
# https://github.com/materialsproject/pymatgen/blob/1eb2f2f/pymatgen/matproj/rest.py
from __future__ import division, unicode_literals
import os, requests, json, warnings, urlparse
class MPResterBase(object):
"""
A base class to conveniently interface with a REST interface in the style of
the Materials Project. For your own "rester", inherit from MPResterBase and
add convenience functions which return the result of HTTP requests via
`MPResterBase._make_request(<URL>, ..)`. The recommended way to use the
resulting `MPCustomRester` is with the "with" context manager to ensure that
sessions are properly closed after usage::
with MPCustomRester("API_KEY") as m:
m.do_something()
MPResterBase uses the "requests" package, which provides for HTTP connection
pooling.
Args:
api_key (str): A String API key for accessing the REST interface. If
this is None, the code will check if there is a "MAPI_KEY"
environment variable set. If so, it will use that environment
variable. This makes it easier for heavy users to simply add this
environment variable to their setups and MPResterBase can then be
called without any arguments.
endpoint (str): URL of endpoint to access the REST interface. Defaults
to the standard Materials Project REST address, but can be changed
to other urls implementing a similar interface.
"""
def __init__(self, api_key=None,
endpoint="https://www.materialsproject.org/rest/v2"):
if api_key is not None:
self.api_key = api_key
else:
self.api_key = os.environ.get("MAPI_KEY", "")
self.preamble = endpoint
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
def __enter__(self):
"""Support for "with" context."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for "with" context."""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET"):
response = None
url = self.preamble + sub_url
try:
if self.session.cookies.get('csrftoken') is None:
from django.core.urlresolvers import reverse
uri = urlparse.urlparse(self.preamble)
domain = '{uri.scheme}://{uri.netloc}'.format(uri=uri)
site_url = uri.path.split('/')[1] # test_site/
browserid_csrf = reverse('browserid.csrf')
if site_url[:-1] not in browserid_csrf:
domain += '/' + site_url
domain += browserid_csrf
self.session.get(domain)
headers = {"X-CSRFToken": self.session.cookies.get('csrftoken')}
response = self.session.post(url, data=payload, headers=headers) \
if method == "POST" else self.session.get(url, params=payload)
if response.status_code in [200, 400]:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
else:
raise MPResterError(data["error"])
raise MPResterError(
"REST query returned with error status code {}"
.format(response.status_code)
)
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), repr(response.content)) \
if hasattr(response, "content") else str(ex)
raise MPResterError(msg)
class MPResterError(Exception):
"""
Exception class for MPResterBase.
Raised when the query has problems, e.g., bad query format.
"""
pass
| # coding: utf-8
# https://github.com/materialsproject/pymatgen/blob/1eb2f2f/pymatgen/matproj/rest.py
from __future__ import division, unicode_literals
import os, requests, json, warnings, urlparse
class MPResterBase(object):
"""
A base class to conveniently interface with a REST interface in the style of
the Materials Project. For your own "rester", inherit from MPResterBase and
add convenience functions which return the result of HTTP requests via
`MPResterBase._make_request(<URL>, ..)`. The recommended way to use the
resulting `MPCustomRester` is with the "with" context manager to ensure that
sessions are properly closed after usage::
with MPCustomRester("API_KEY") as m:
m.do_something()
MPResterBase uses the "requests" package, which provides for HTTP connection
pooling.
Args:
api_key (str): A String API key for accessing the REST interface. If
this is None, the code will check if there is a "MAPI_KEY"
environment variable set. If so, it will use that environment
variable. This makes it easier for heavy users to simply add this
environment variable to their setups and MPResterBase can then be
called without any arguments.
endpoint (str): URL of endpoint to access the REST interface. Defaults
to the standard Materials Project REST address, but can be changed
to other urls implementing a similar interface.
"""
def __init__(self, api_key=None,
endpoint="https://www.materialsproject.org/rest/v2"):
if api_key is not None:
self.api_key = api_key
else:
self.api_key = os.environ.get("MAPI_KEY", "")
self.preamble = endpoint
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
def __enter__(self):
"""Support for "with" context."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for "with" context."""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET"):
response = None
url = self.preamble + sub_url
try:
if self.session.cookies.get('csrftoken') is None:
from django.core.urlresolvers import reverse
uri = urlparse.urlparse(self.preamble)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=uri)
domain += uri.path.split('/')[1] # test_site/
domain += reverse('browserid.csrf')
self.session.get(domain)
headers = {"X-CSRFToken": self.session.cookies.get('csrftoken')}
response = self.session.post(url, data=payload, headers=headers) \
if method == "POST" else self.session.get(url, params=payload)
if response.status_code in [200, 400]:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
else:
raise MPResterError(data["error"])
raise MPResterError(
"REST query returned with error status code {}"
.format(response.status_code)
)
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), repr(response.content)) \
if hasattr(response, "content") else str(ex)
raise MPResterError(msg)
class MPResterError(Exception):
"""
Exception class for MPResterBase.
Raised when the query has problems, e.g., bad query format.
"""
pass
| Python | 0 |
622d3ae0d6434ff84a96d5e51b1d14ac70364c28 | make this a module that can be imported for testing | multigrid/mg_test.py | multigrid/mg_test.py | #!/usr/bin/env python
"""
an example of using the multigrid class to solve Laplace's equation. Here, we
solve
u_xx + u_yy = -2[(1-6x**2)y**2(1-y**2) + (1-6y**2)x**2(1-x**2)]
u = 0 on the boundary
this is the example from page 64 of the book `A Multigrid Tutorial, 2nd Ed.'
The analytic solution is u(x,y) = (x**2 - x**4)(y**4 - y**2)
"""
from __future__ import print_function
import numpy as np
import MG
import matplotlib.pyplot as plt
import compare
# the analytic solution
def true(x,y):
return (x**2 - x**4)*(y**4 - y**2)
# the L2 error norm
def error(myg, r):
# L2 norm of elements in r, multiplied by dx to
# normalize
return np.sqrt(myg.dx*myg.dy*np.sum((r[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1]**2).flat))
# the righthand side
def f(x,y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False,
make_plot=False):
# test the multigrid solver
nx = N
ny = nx
# create the multigrid object
a = MG.CellCenterMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
verbose=1)
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
#a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d,a.y2d)
e = v - b
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
(error(a.soln_grid, e), a.relative_error, a.num_cycles))
# plot it
if make_plot:
plt.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w')
plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.savefig("mg_test.png")
# store the output for later comparison
bench = "mg_poisson_dirichlet"
if store_bench:
my_data = a.get_solution_object()
my_data.write("tests/{}".format(bench))
# do we do a comparison?
if comp_bench:
compare_file = "tests/{}".format(bench)
msg.warning("comparing to: %s " % (compare_file) )
bench_grid, bench_data = patch.read(compare_file)
result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data)
if result == 0:
msg.success("results match benchmark\n")
else:
msg.warning("ERROR: " + compare.errors[result] + "\n")
return result
return None
if __name__ == "__main__":
test_poisson_dirichlet(256)
| #!/usr/bin/env python
"""
an example of using the multigrid class to solve Laplace's equation. Here, we
solve
u_xx + u_yy = -2[(1-6x**2)y**2(1-y**2) + (1-6y**2)x**2(1-x**2)]
u = 0 on the boundary
this is the example from page 64 of the book `A Multigrid Tutorial, 2nd Ed.'
The analytic solution is u(x,y) = (x**2 - x**4)(y**4 - y**2)
"""
from __future__ import print_function
import numpy as np
import MG
import matplotlib.pyplot as plt
# the analytic solution
def true(x,y):
return (x**2 - x**4)*(y**4 - y**2)
# the L2 error norm
def error(myg, r):
# L2 norm of elements in r, multiplied by dx to
# normalize
return np.sqrt(myg.dx*myg.dy*np.sum((r[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1]**2).flat))
# the righthand side
def f(x,y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
# test the multigrid solver
nx = 256
ny = nx
# create the multigrid object
a = MG.CellCenterMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
verbose=1)
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
#a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d,a.y2d)
e = v - b
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
(error(a.soln_grid, e), a.relative_error, a.num_cycles))
# plot it
plt.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w')
plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.savefig("mg_test.png")
# store the output for later comparison
my_data = a.get_solution_object()
my_data.write("mg_test")
| Python | 0.000001 |
1506dda66814b8f51ec2dcbf2e632bdafa98bf75 | add root node info to form | arches/app/views/graph.py | arches/app/views/graph.py | '''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from django.shortcuts import render
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.http import HttpResponseNotFound
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.JSONResponse import JSONResponse
from arches.app.models.resource_graphs import ResourceGraph
from arches.app.models import models
@csrf_exempt
def manager(request, nodeid):
graph = ResourceGraph(nodeid)
branches = JSONSerializer().serializeToPython(models.BranchMetadata.objects.all())
branch_nodes = models.Node.objects.filter(~Q(branchmetadata=None), istopnode=True)
for branch in branches:
rootnode = branch_nodes.get(branchmetadata_id=branch['branchmetadataid'])
branch['rootnode'] = rootnode
branch['graph'] = ResourceGraph(rootnode)
datatypes = models.DDataType.objects.all()
return render(request, 'graph-manager.htm', {
'main_script': 'graph-manager',
'graph': JSONSerializer().serialize(graph),
'branches': JSONSerializer().serialize(branches),
'datatypes': JSONSerializer().serialize(datatypes),
'node_list': {
'title': _('Node List'),
'search_placeholder': _('Find a node in the graph')
},
'permissions_list': {
'title': _('Permissions'),
'search_placeholder': _('Find a group or user account')
},
'branch_list': {
'title': _('Branch Library'),
'search_placeholder': _('Find a graph branch')
}
})
@csrf_exempt
def node(request, nodeid):
if request.method == 'POST':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
node.name = data.get('name', '')
node.description = data.get('description','')
node.istopnode = data.get('istopnode','')
node.crmclass = data.get('crmclass','')
node.datatype = data.get('datatype','')
node.status = data.get('status','')
node.save()
return JSONResponse(node)
if request.method == 'DELETE':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
edge = models.Edge.objects.get(rangenode=node)
edge.delete()
graph = ResourceGraph(nodeid)
for edge in graph.edges:
edge.delete()
for node in graph.nodes:
node.delete()
return JSONResponse({})
return HttpResponseNotFound
@csrf_exempt
def appendbranch(request, nodeid, branchid):
if request.method == 'POST':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
node.name = data.get('name', '')
node.description = data.get('description','')
node.istopnode = data.get('istopnode','')
node.crmclass = data.get('crmclass','')
node.datatype = data.get('datatype','')
node.status = data.get('status','')
node.save()
return JSONResponse(node)
return HttpResponseNotFound
| '''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from django.shortcuts import render
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.http import HttpResponseNotFound
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.JSONResponse import JSONResponse
from arches.app.models.resource_graphs import ResourceGraph
from arches.app.models import models
@csrf_exempt
def manager(request, nodeid):
graph = ResourceGraph(nodeid)
branches = JSONSerializer().serializeToPython(models.BranchMetadata.objects.all())
branch_nodes = models.Node.objects.filter(~Q(branchmetadata=None), istopnode=True)
for branch in branches:
branch['graph'] = ResourceGraph(branch_nodes.get(branchmetadata_id=branch['branchmetadataid']))
datatypes = models.DDataType.objects.all()
return render(request, 'graph-manager.htm', {
'main_script': 'graph-manager',
'graph': JSONSerializer().serialize(graph),
'branches': JSONSerializer().serialize(branches),
'datatypes': JSONSerializer().serialize(datatypes),
'node_list': {
'title': _('Node List'),
'search_placeholder': _('Find a node in the graph')
},
'permissions_list': {
'title': _('Permissions'),
'search_placeholder': _('Find a group or user account')
},
'branch_list': {
'title': _('Branch Library'),
'search_placeholder': _('Find a graph branch')
}
})
@csrf_exempt
def node(request, nodeid):
if request.method == 'POST':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
node.name = data.get('name', '')
node.description = data.get('description','')
node.istopnode = data.get('istopnode','')
node.crmclass = data.get('crmclass','')
node.datatype = data.get('datatype','')
node.status = data.get('status','')
node.save()
return JSONResponse(node)
if request.method == 'DELETE':
data = JSONDeserializer().deserialize(request.body)
if data:
with transaction.atomic():
node = models.Node.objects.get(nodeid=nodeid)
edge = models.Edge.objects.get(rangenode=node)
edge.delete()
graph = ResourceGraph(nodeid)
for edge in graph.edges:
edge.delete()
for node in graph.nodes:
node.delete()
return JSONResponse({})
return HttpResponseNotFound
| Python | 0 |
b699f950eebbe10c400e9867ce8bead02d2f651c | Remove another thing. | src/txacme/interfaces.py | src/txacme/interfaces.py | # -*- coding: utf-8 -*-
"""
Interface definitions for txacme.
"""
from zope.interface import Interface
class ITLSSNI01Responder(Interface):
"""
Configuration for a tls-sni-01 challenge responder.
The actual responder may exist somewhere else, this interface is merely for
an object that knows how to configure it.
"""
def start_responding(server_name):
"""
Start responding for a particular challenge.
.. seealso:: `txacme.util.generate_tls_sni_01_cert`
:param str server_name: The server name to respond to: ie.
`u'<hex>.<hex>.acme.invalid'`.
:rtype: `~twisted.internet.defer.Deferred`
:return: A deferred firing when the given hostname is ready to respond
with the given authorization.
"""
def stop_responding(server_name):
"""
Stop responding for a particular challenge.
May be a noop if a particular responder does not need or implement
explicit cleanup; implementations should not rely on this method always
being called.
:param str server_name: The server name to stop responding for: ie.
`u'<hex>.<hex>.acme.invalid'`.
"""
__all__ = ['ITLSSNI01Responder']
| # -*- coding: utf-8 -*-
"""
Interface definitions for txacme.
"""
from zope.interface import Interface
class ITLSSNI01Responder(Interface):
"""
Configuration for a tls-sni-01 challenge responder.
The actual responder may exist somewhere else, this interface is merely for
an object that knows how to configure it.
"""
def start_responding(server_name):
"""
Start responding for a particular challenge.
.. seealso:: `txacme.util.generate_tls_sni_01_cert`
:param str server_name: The server name to respond to: ie.
`u'<hex>.<hex>.acme.invalid'`.
:rtype: `~twisted.internet.defer.Deferred`
:return: A deferred firing when the given hostname is ready to respond
with the given authorization.
"""
def stop_responding(server_name):
"""
Stop responding for a particular challenge.
May be a noop if a particular responder does not need or implement
explicit cleanup; implementations should not rely on this method always
being called.
:param str server_name: The server name to stop responding for: ie.
`u'<hex>.<hex>.acme.invalid'`.
"""
class ICertificateStore(Interface):
"""
A store of certificate/keys/chains.
"""
def get(self, server_name):
"""
Retrieve the current PEM objects for the given server name.
:param str server_name: The server name.
:raises KeyError: if the given name does not exist in the store.
:return: ``Deferred[List[:ref:`pem-objects`]]``
"""
def store(self, server_name, pem_objects):
"""
Store PEM objects for the given server name.
Implementations do not have to permit invoking this with a server name
that was not already present in the store.
:param str server_name: The server name to update.
:param pem_objects: A list of :ref:`pem-objects`; must contain exactly
one private key, a certificate corresponding to that private key,
and zero or more chain certificates.
:rtype: ``Deferred``
"""
def as_dict(self):
"""
Get all certificates in the store.
:rtype: ``Deferred[Dict[str, List[:ref:`pem-objects`]]]``
:return: A deferred firing with a dict mapping server names to
:ref:`pem-objects`.
"""
__all__ = ['ITLSSNI01Responder']
| Python | 0.000001 |
358729ade26e9a8a101bd77d574d9f5e1f065b0d | Delete single question | relier/api/question.py | relier/api/question.py | from flask import abort, request, make_response
from relier.models import Event, Question, Answer
from relier.api import AuthenticatedResource
from datetime import datetime
from flask import g
class QuestionResource(AuthenticatedResource):
def post(self, event_id):
if not g.user.can_ask:
abort(403)
event = None
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception:
abort(400)
if not content:
abort(400)
event = Event.get(Event.id == event_id)
if not event:
abort(404)
try:
question = Question.create(created=datetime.now(),
content=content, event=event)
except Exception:
abort(500)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}'.format(id_ = event.id, question_id_ = question.id)
return response
class QuestionInstance(AuthenticatedResource):
# Retrieve single Question
def get(self, event_id, question_id):
if Question.select().where(Question.id == question_id).count() == 0:
abort(404)
question = Question.get(Question.id == question_id)
return QuestionInstance.question_to_json(question)
def delete(self, event_id, question_id):
if not g.user.is_admin:
abort(403)
question = None
try:
question = Question.get(Question.id == question_id)
except Question.DoesNotExist:
abort(404)
answer_delete_query = Answer.delete().where(Answer.question == question)
answer_delete_query.execute()
question.delete_instance();
response = make_response('', 204)
return response
@staticmethod
def question_to_json(question):
answer_json = ''
try:
answer = Answer.get(Answer.question == question)
answer_json = answer.JSON()
except Exception:
pass
return {
'id': question.id,
'content': question.content,
'created': question.created.strftime('%Y-%m-%d %H:%M'),
'updated': question.updated.strftime('%Y-%m-%d %H:%M') if question.updated else '',
'answer': answer_json
}
class AnswerResource(AuthenticatedResource):
def post(self, event_id, question_id):
if not g.user.can_answer:
abort(403)
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception as e:
print e
abort(400)
question = Question.get(Question.id == question_id)
if not question:
abort(400)
answer = Answer.create( question = question,
created = datetime.now(),
content = content)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}/answers/{answer_id_}'.format(id_ = event_id, question_id_ = question.id, answer_id_ = answer.id)
return response
| from flask import abort, request, make_response
from relier.models import Event, Question, Answer
from relier.api import AuthenticatedResource
from datetime import datetime
from flask import g
class QuestionResource(AuthenticatedResource):
def post(self, event_id):
if not g.user.can_ask:
abort(403)
event = None
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception:
abort(400)
if not content:
abort(400)
event = Event.get(Event.id == event_id)
if not event:
abort(404)
try:
question = Question.create(created=datetime.now(),
content=content, event=event)
except Exception:
abort(500)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}'.format(id_ = event.id, question_id_ = question.id)
return response
class QuestionInstance(AuthenticatedResource):
# Retrieve single Question
def get(self, event_id, question_id):
if Question.select().where(Question.id == question_id).count() == 0:
abort(404)
question = Question.get(Question.id == question_id)
return QuestionInstance.question_to_json(question)
def delete(self, event_id, question_id):
pass
@staticmethod
def question_to_json(question):
answer_json = ''
try:
answer = Answer.get(Answer.question == question)
answer_json = answer.JSON()
except Exception:
pass
return {
'id': question.id,
'content': question.content,
'created': question.created.strftime('%Y-%m-%d %H:%M'),
'updated': question.updated.strftime('%Y-%m-%d %H:%M') if question.updated else '',
'answer': answer_json
}
class AnswerResource(AuthenticatedResource):
def post(self, event_id, question_id):
if not g.user.can_answer:
abort(403)
try:
body = request.json
content = body['content'].encode('utf-8')
except Exception as e:
print e
abort(400)
question = Question.get(Question.id == question_id)
if not question:
abort(400)
answer = Answer.create( question = question,
created = datetime.now(),
content = content)
response = make_response('', 201)
response.headers['Location'] = '/events/{id_}/questions/{question_id_}/answers/{answer_id_}'.format(id_ = event_id, question_id_ = question.id, answer_id_ = answer.id)
return response
| Python | 0.999999 |
8112440223e2e8e4f5d8cb93b28fd846dd59418b | Add logout view. | repocracy/repo/urls.py | repocracy/repo/urls.py | from django.conf.urls.defaults import *
from django.conf import settings
import os
urlpatterns = patterns('repocracy.repo.views',
url(r'^$', 'home', name='home'),
url(r'^claim/(?P<pk>\d+)/(?P<claim_hash>[a-fA-F\d]{40})/$', 'repo_claim', name='repo_claim'),
url(r'^users/(?P<name>[\-_\d\w\\\.]+)/$', 'repo_owner', name='repo_owner'),
url(r'^repos/(?P<name>[/\-_\d\w\\\.]+)/$', 'repo_detail', name='repo_detail'),
url(r'^post-receive/(?P<pk>\d+)/$', 'post_receive', name='post_receive'),
url(r'^status/(?P<pk>\d+)/$', 'repo_status', name='repo_status'),
)
urlpatterns += patterns('',
# Not a smart way to serve repos (very slow).
# Serve with nginx using static http, or preferably the CGI hgwebdir script
url(r'^hg(?P<path>.*)$', 'django.views.static.serve',
{'show_indexes': True, 'document_root': os.path.join(settings.REPOCRACY_BASE_REPO_PATH, 'public_hg')}),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'redirect_field_name': 'next'}),
)
| from django.conf.urls.defaults import *
from django.conf import settings
import os
urlpatterns = patterns('repocracy.repo.views',
url(r'^$', 'home', name='home'),
url(r'^claim/(?P<pk>\d+)/(?P<claim_hash>[a-fA-F\d]{40})/$', 'repo_claim', name='repo_claim'),
url(r'^users/(?P<name>[\-_\d\w\\\.]+)/$', 'repo_owner', name='repo_owner'),
url(r'^repos/(?P<name>[/\-_\d\w\\\.]+)/$', 'repo_detail', name='repo_detail'),
url(r'^post-receive/(?P<pk>\d+)/$', 'post_receive', name='post_receive'),
url(r'^status/(?P<pk>\d+)/$', 'repo_status', name='repo_status'),
)
urlpatterns += patterns('',
# Not a smart way to serve repos (very slow).
# Serve with nginx using static http, or preferably the CGI hgwebdir script
url(r'^hg(?P<path>.*)$', 'django.views.static.serve',
{'show_indexes': True, 'document_root': os.path.join(settings.REPOCRACY_BASE_REPO_PATH, 'public_hg')}),
)
| Python | 0 |
8b008968e92cabf1022dff6edb37f38c3aaa5214 | Update merge_filter.py | uf_examples/courses/merge_filter.py | uf_examples/courses/merge_filter.py | #!/usr/bin/env/python
"""
merge_filter.py -- find the courses in VIVO, and match them to the courses in the source. They
must match on ccn
There are two inputs:
1. Courses in VIVO. Keyed by ccn
2. UF courses in the source. Keyed the same.
There are three cases
1. Course in VIVO and in Source => Update VIVO from source
1. Course in VIVO, not in source => nothing to do
1. Course not in VIVO, is in source => Add to VIVO
See CHANGELOG.md for history
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.02"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_vivo_ccn, get_parms
parms = get_parms()
data_in = read_csv_fp(sys.stdin)
print >>sys.stderr, len(data_in)
data_out = {}
vivo_courses = get_vivo_ccn(parms) # get dictionary of course uri keyed by ccn
print >>sys.stderr, 'VIVO courses', len(vivo_courses)
for row, data in data_in.items():
new_data = dict(data)
if data['ccn'] in vivo_courses: # ccn is in vivo and source
new_data['uri'] = vivo_courses[data['ccn']]
else: # key is in source, not in vivo
new_data['uri'] = ''
data_out[row] = new_data
print >>sys.stderr, 'data out', len(data_out)
write_csv_fp(sys.stdout, data_out)
| #!/usr/bin/env/python
"""
merge_filter.py -- find the courses in VIVO, and match them to the courses in the source. They
must match on ccn
There are two inputs:
1. Courses in VIVO. Keyed by ccn
2. UF courses in the source. Keyed the same.
There are three cases
1. Course in VIVO and in Source => Update VIVO from source
1. Course in VIVO, not in source => nothing to do
1. Course not in VIVO, is in source => Add to VIVO
See CHANGELOG.md for history
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.02"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_vivo_ccn, get_parms
parms = get_parms()
data_in = read_csv_fp(sys.stdin)
print >>sys.stderr, len(data_in)
data_out = {}
vivo_courses = get_vivo_ccn(parms) # get dictionary of course uri keyed by ccn
print >>sys.stderr, 'VIVO courses', len(vivo_courses)
for row, data in data_in.items():
new_data = dict(data)
if data['ccn'] in vivo_courses: # ccn is in vivo and source
new_data['uri'] = vivo_courses[data['ccn']]
else: # key is in source, not in vivo
new_data['uri'] = ''
data_out[row] = new_data
print >>sys.stderr, 'data out', len(data_out)
write_csv_fp(sys.stdout, data_out)
| Python | 0.000001 |
672876c172d9bba9e2f29707f9fdd95e0ff10f9f | put data early in Redis at hourly recache | hortiradar/website/refresh_cache.py | hortiradar/website/refresh_cache.py | import argparse
from datetime import datetime
import flask
import ujson as json
from app import app, get_period
from hortiradar import time_format
from processing import get_cache_key, get_process_top_params, process_details, process_top, redis
def main():
parser = argparse.ArgumentParser(description="Refresh the cache for hortiradar analytics.")
parser.add_argument("--verbose", "-v", action="store_true")
args = parser.parse_args()
# bigger than usual time for when the hourly recache is too slow
cache_time = 120 * 60
groups = ["bloemen", "groente_en_fruit"]
get_time = lambda: datetime.now().strftime("%H:%M")
start_time = get_time()
max_amount = 10
group_data = []
for group in groups:
if args.verbose:
print("Caching group: {}".format(group))
arguments = (group, max_amount, get_process_top_params(group))
key = get_cache_key(process_top, *arguments)
data = process_top(*arguments, force_refresh=True, cache_time=cache_time)
group_data.append((key, data))
redis.set(key, json.dumps(data), ex=cache_time)
with app.test_request_context("/?period=week"):
_, start, end, _ = get_period(flask.request, "week")
params = {"start": start.strftime(time_format), "end": end.strftime(time_format)}
for (_, group) in group_data:
for keyword in group:
prod = keyword["label"]
if args.verbose:
print("Caching keyword: {}".format(prod))
key = get_cache_key(process_details, prod, params)
data = process_details(prod, params, force_refresh=True, cache_time=cache_time)
redis.set(key, json.dumps(data), ex=cache_time)
end_time = get_time()
sync_time = "{} - {}".format(start_time, end_time) if start_time != end_time else start_time
redis.set("sync_time", sync_time)
if __name__ == "__main__":
main()
| import argparse
from datetime import datetime
import flask
import ujson as json
from app import app, get_period
from hortiradar import time_format
from processing import get_cache_key, get_process_top_params, process_details, process_top, redis
def main():
parser = argparse.ArgumentParser(description="Refresh the cache for hortiradar analytics.")
parser.add_argument("--verbose", "-v", action="store_true")
args = parser.parse_args()
# bigger than usual time for when the hourly recache is too slow
cache_time = 120 * 60
groups = ["bloemen", "groente_en_fruit"]
get_time = lambda: datetime.now().strftime("%H:%M")
start_time = get_time()
max_amount = 10
group_data = []
for group in groups:
if args.verbose:
print("Caching group: {}".format(group))
arguments = (group, max_amount, get_process_top_params(group))
key = get_cache_key(process_top, *arguments)
data = process_top(*arguments, force_refresh=True, cache_time=cache_time)
group_data.append((key, data))
with app.test_request_context("/?period=week"):
_, start, end, _ = get_period(flask.request, "week")
params = {"start": start.strftime(time_format), "end": end.strftime(time_format)}
keyword_data = []
for (_, group) in group_data:
for keyword in group:
prod = keyword["label"]
if args.verbose:
print("Caching keyword: {}".format(prod))
key = get_cache_key(process_details, prod, params)
data = process_details(prod, params, force_refresh=True, cache_time=cache_time)
keyword_data.append((key, data))
end_time = get_time()
# Now populate the cache with the new data
for (key, data) in group_data + keyword_data:
redis.set(key, json.dumps(data), ex=cache_time)
sync_time = "{} - {}".format(start_time, end_time) if start_time != end_time else start_time
redis.set("sync_time", sync_time)
if __name__ == "__main__":
main()
| Python | 0 |
e77243ebd39eea6033b14d53ddeea870893548ae | Create tomato.py | tomato.py | tomato.py | #!/bin/python2.7
import argparse
import re
import random
import struct
from itertools import chain
#################
### ARGUMENTS ###
#################
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("file", help="input file")
parser.add_argument("-o", "--output", help="output file")
parser.add_argument('-m', "--mode", action='store', dest='modevalue',help='choose mode')
parser.add_argument('-c', action='store', dest='countframes',help='var1', default=1)
parser.add_argument('-n', action='store', dest='positframes',help='var2', default=1)
parser.add_argument('-s', action='store', dest='simple_value',
help='Store a simple value')
args = parser.parse_args()
filein = args.file
fileout = args.output
mode = args.modevalue
countframes = args.countframes
positframes = args.positframes
####################
### OPENING FILE ###
####################
#open .avi file as binary
with open (filein, 'rb') as f:
## split the content at "idx1"
a = f.read().split('idx1', 1)
a1 = a[1]
## get the length of the index and store it
a1, idxl = a1[4:], a1[:4]
## get the first iframe and store it
n = 16
iframe, a1 = a1[:n], a1[n:]
## put all frames in array
b = [a1[i:i+n] for i in range(0, len(a1), n)]
## take out all of the sound frames cuz who gives a fuck
sframeregex = re.compile(b'01wb\x10\x00\x00\x00.{8}')
b = [x for x in b if not re.match(sframeregex,x)]
## calculate number of frames
c = len(b)
#########################
### OPERATIONS TO IDX ###
#########################
### MODE - SHUFFLE
#####################
if mode == "shuffle":
g = random.sample(b,c)
### MODE - DELETE IFRAMES
###########################
if mode == "ikill":
iframeregex = re.compile(b'00dc\x10\x00\x00\x00.{8}')
g = [x for x in b if not re.match(iframeregex,x)]
### MODE - BLOOM
##################
if mode == "bloom":
## bloom options
repeat = int(countframes)
frame = int(positframes)
## split list
lista = b[:frame]
listb = b[frame:]
## rejoin list with bloom
g = lista + ([b[frame]]*repeat) + listb
### MODE - P PULSE
##################
if mode == "pulse":
pulselen = int(countframes)
pulseryt = int(positframes)
d = [[x for j in range(pulselen)] if not i%pulseryt else x for i,x in enumerate(b)]
e = [item for sublist in d for item in sublist]
f = ''.join(e)
g = [f[i:i+n] for i in range(0, len(f), n)]
##just having fun by adding this at the end of the bloom
#d = random.sample(d,c + repeat)
########################
### FIX INDEX LENGTH ###
########################
print "old index size : " + str(c + 1) + " frames"
hey = len(g)*16
print "new index size : " + str((hey/16) + 1) + " frames"
## convert it to packed data
idxl = struct.pack('<I',hey)
###################
### SAVING FILE ###
###################
## rejoin the whole thing
data = ''.join(a[0] + "idx1" + idxl + iframe + ''.join(g))
f = open(fileout, 'wb')
f.write(data)
f.close()
| #!/bin/python2.7
import argparse
import re
import random
import struct
from itertools import chain
#################
### ARGUMENTS ###
#################
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("file", help="input file")
parser.add_argument("-o", "--output", help="output file")
parser.add_argument('-m', "--mode", action='store', dest='modevalue',help='choose mode')
#parser.add_argument("-m", "--mode", type=string, choices=["ikill","iswap","bloom","pulse","shuffle"],help="defines script mode", default=False)
args = parser.parse_args()
filein = args.file
fileout = args.output
mode = args.modevalue
####################
### OPENING FILE ###
####################
#open .avi file as binary
with open (filein, 'rb') as f:
## split the content at "idx1"
a = f.read().split('idx1', 1)
a1 = a[1]
## get the length of the index and store it
a1, idxl = a1[4:], a1[:4]
## get the first iframe and store it
n = 16
iframe, a1 = a1[:n], a1[n:]
## put all frames in array
b = [a1[i:i+n] for i in range(0, len(a1), n)]
## take out all of the sound frames cuz who gives a fuck
sframeregex = re.compile(b'01wb\x10\x00\x00\x00.{8}')
b = [x for x in b if not re.match(sframeregex,x)]
## calculate number of frames
c = len(b)
#########################
### OPERATIONS TO IDX ###
#########################
### MODE - SHUFFLE
#####################
if mode == "shuffle":
g = random.sample(b,c)
### MODE - DELETE IFRAMES
###########################
if mode == "ikill":
iframeregex = re.compile(b'00dc\x10\x00\x00\x00.{8}')
g = [x for x in b if not re.match(iframeregex,x)]
### MODE - BLOOM
##################
if mode == "bloom":
## bloom options
frame = 150
repeat = 500
## split list
lista = b[:frame]
listb = b[frame:]
## rejoin list with bloom
g = lista + ([b[frame]]*repeat) + listb
### MODE - P PULSE
##################
if mode == "pulse":
pulselen = 20
pulseryt = 100
d = [[x for j in range(pulselen)] if not i%pulseryt else x for i,x in enumerate(b)]
e = [item for sublist in d for item in sublist]
f = ''.join(e)
g = [f[i:i+n] for i in range(0, len(f), n)]
##just having fun by adding this at the end of the bloom
#d = random.sample(d,c + repeat)
########################
### FIX INDEX LENGTH ###
########################
print "old index size : " + str(c + 1) + " frames"
hey = len(g)*16
print "new index size : " + str((hey/16) + 1) + " frames"
## convert it to packed data
idxl = struct.pack('<I',hey)
###################
### SAVING FILE ###
###################
## rejoin the whole thing
data = ''.join(a[0] + "idx1" + idxl + iframe + ''.join(g))
f = open(fileout, 'wb')
f.write(data)
f.close()
| Python | 0.999536 |
2e39edbfab0d1d70ca527a024210a15d357842b7 | Fix non-existent import | elaboratecharts/__init__.py | elaboratecharts/__init__.py | from os import path
from shutil import copytree, rmtree
from flask.ext.assets import Environment, Bundle
from .views import elaboratecharts
def rel(p):
return path.join(path.dirname(__file__), p)
class ElaborateCharts(object):
def __init__(self, app=None, url_prefix=None):
if app is not None:
self.init_app(app, url_prefix=url_prefix)
def init_app(self, app, url_prefix=None):
app.register_blueprint(elaboratecharts, url_prefix=url_prefix)
self.init_assets(app, url_prefix=url_prefix)
def init_assets(self, app, url_prefix=None):
blueprint = app.blueprints['elaboratecharts']
env = Environment(app)
env.url = (url_prefix or '') + blueprint.static_url_path
env.directory = blueprint.static_folder
env.load_path = map(rel, [
'scss',
'coffee',
'bower_components',
])
js_bundle = Bundle(
Bundle(
'jquery/dist/jquery.js',
'bootstrap-sass-official/assets/javascripts/bootstrap.js',
'moment/min/moment-with-locales.js',
'highstock-release/highstock.src.js',
'ladda-bootstrap/dist/spin.js',
'ladda-bootstrap/dist/ladda.js',
'bluebird/js/browser/bluebird.js',
'lodash/dist/lodash.js',
('history.js/scripts/bundled-uncompressed/html4+html5/'
'jquery.history.js'),
'jquery.finger/dist/jquery.finger.js',
output='js_requirements.js'),
Bundle(
'index.coffee',
filters=['coffeescript'],
output='js_index.js'))
css_bundle = Bundle(
'all.scss',
filters=['scss'],
output='css_all.css')
env.config['sass_load_paths'] = map(rel, [
'bower_components/bootstrap-sass-official/assets/stylesheets/',
'bower_components/ladda-bootstrap/css/',
'bower_components/font-awesome/scss/',
])
# Copy fonts to static folder
static_fonts = path.join(env.directory, 'fonts')
try:
rmtree(static_fonts)
except OSError:
pass
copytree(rel('bower_components/font-awesome/fonts'),
static_fonts)
env.register('js_all', js_bundle)
env.register('css_all', css_bundle)
| from os import path
from shutil import copytree, rmtree
from flask.ext.assets import Environment, Bundle
from . import assets
from .views import elaboratecharts
def rel(p):
return path.join(path.dirname(__file__), p)
class ElaborateCharts(object):
def __init__(self, app=None, url_prefix=None):
if app is not None:
self.init_app(app, url_prefix=url_prefix)
def init_app(self, app, url_prefix=None):
app.register_blueprint(elaboratecharts, url_prefix=url_prefix)
self.init_assets(app, url_prefix=url_prefix)
def init_assets(self, app, url_prefix=None):
blueprint = app.blueprints['elaboratecharts']
env = Environment(app)
env.url = (url_prefix or '') + blueprint.static_url_path
env.directory = blueprint.static_folder
env.load_path = map(rel, [
'scss',
'coffee',
'bower_components',
])
js_bundle = Bundle(
Bundle(
'jquery/dist/jquery.js',
'bootstrap-sass-official/assets/javascripts/bootstrap.js',
'moment/min/moment-with-locales.js',
'highstock-release/highstock.src.js',
'ladda-bootstrap/dist/spin.js',
'ladda-bootstrap/dist/ladda.js',
'bluebird/js/browser/bluebird.js',
'lodash/dist/lodash.js',
('history.js/scripts/bundled-uncompressed/html4+html5/'
'jquery.history.js'),
'jquery.finger/dist/jquery.finger.js',
output='js_requirements.js'),
Bundle(
'index.coffee',
filters=['coffeescript'],
output='js_index.js'))
css_bundle = Bundle(
'all.scss',
filters=['scss'],
output='css_all.css')
env.config['sass_load_paths'] = map(rel, [
'bower_components/bootstrap-sass-official/assets/stylesheets/',
'bower_components/ladda-bootstrap/css/',
'bower_components/font-awesome/scss/',
])
# Copy fonts to static folder
static_fonts = path.join(env.directory, 'fonts')
try:
rmtree(static_fonts)
except OSError:
pass
copytree(rel('bower_components/font-awesome/fonts'),
static_fonts)
env.register('js_all', js_bundle)
env.register('css_all', css_bundle)
| Python | 0.999866 |
b6098d5b4578547fea192fe96998dbc43ef9dcb0 | upgrade values check | http_lazy_headers/fields/upgrade.py | http_lazy_headers/fields/upgrade.py | # -*- coding: utf-8 -*-
from ..shared.utils import constraints
from ..shared.utils import assertions
from ..shared import bases
def upgrade(name, version=None):
return name, version
class ProtocolName:
# http://www.iana.org/assignments/http-upgrade-tokens/http-upgrade-tokens.xml
http = 'HTTP'
tls = 'TLS'
web_socket = 'WebSocket'
h2c = 'h2c'
class Upgrade(bases.MultiHeaderBase):
"""
The ``Upgrade`` header field is intended to\
provide a simple mechanism for transitioning\
from HTTP/1.1 to some other protocol on the\
same connection. A client MAY send a list of\
protocols in the Upgrade header field of a\
request to invite the server to switch to\
one or more of those protocols, in order of\
descending preference, before sending the\
final response. A server MAY ignore a\
received Upgrade header field if it wishes\
to continue using the current protocol on\
that connection. Upgrade cannot be used to\
insist on a protocol change.
Example::
Upgrade([
upgrade(ProtocolName.http, '2.0')
])
Upgrade([
upgrade(ProtocolName.web_socket)
])
Upgrade([
('HTTP', '2.0'),
('SHTTP', '1.3'),
('IRC', '6.9'),
('RTA', 'x11')
])
`Ref. <http://httpwg.org/specs/rfc7230.html#header.upgrade>`_
"""
name = 'upgrade'
def check_value(self, value):
assertions.must_be_tuple_of(value, 2)
protocol, version = value
assertions.must_be_token(protocol)
version is None or assertions.must_be_token(version)
def value_str(self, value):
protocol, version = value
if version:
return '{}/{}'.format(protocol, version)
return protocol
def values_str(self, values):
return ', '.join(
self.value_str(v)
for v in values)
def clean_value(self, raw_value):
try:
protocol_name, protocol_version = raw_value.split('/', 1)
except ValueError:
constraints.must_be_token(raw_value) # Just name
return raw_value, None
else:
constraints.must_be_token(protocol_name)
constraints.must_be_token(protocol_version)
return protocol_name, protocol_version
| # -*- coding: utf-8 -*-
from ..shared.utils import constraints
from ..shared import bases
def upgrade(name, version=None):
return name, version
class ProtocolName:
# http://www.iana.org/assignments/http-upgrade-tokens/http-upgrade-tokens.xml
http = 'HTTP'
tls = 'TLS'
web_socket = 'WebSocket'
h2c = 'h2c'
class Upgrade(bases.MultiHeaderBase):
"""
The ``Upgrade`` header field is intended to\
provide a simple mechanism for transitioning\
from HTTP/1.1 to some other protocol on the\
same connection. A client MAY send a list of\
protocols in the Upgrade header field of a\
request to invite the server to switch to\
one or more of those protocols, in order of\
descending preference, before sending the\
final response. A server MAY ignore a\
received Upgrade header field if it wishes\
to continue using the current protocol on\
that connection. Upgrade cannot be used to\
insist on a protocol change.
Example::
Upgrade([
upgrade(ProtocolName.http, '2.0')
])
Upgrade([
upgrade(ProtocolName.web_socket)
])
Upgrade([
('HTTP', '2.0'),
('SHTTP', '1.3'),
('IRC', '6.9'),
('RTA', 'x11')
])
`Ref. <http://httpwg.org/specs/rfc7230.html#header.upgrade>`_
"""
name = 'upgrade'
def value_str(self, value):
protocol, version = value
if version:
return '{}/{}'.format(protocol, version)
return protocol
def values_str(self, values):
return ', '.join(
self.value_str(v)
for v in values)
def clean_value(self, raw_value):
try:
protocol_name, protocol_version = raw_value.split('/', 1)
except ValueError:
constraints.must_be_token(raw_value) # Just name
return raw_value, None
else:
constraints.must_be_token(protocol_name)
constraints.must_be_token(protocol_version)
return protocol_name, protocol_version
| Python | 0.000001 |
471e0f4e91eb4513315193ce2b2b0f13e2c9724c | remove stray " | corehq/util/datadog/gauges.py | corehq/util/datadog/gauges.py | from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
"""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
soft_assert(fail_if_debug=True).call(
not enforce_prefix or name.split('.')[0] == enforce_prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
| from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
""""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
soft_assert(fail_if_debug=True).call(
not enforce_prefix or name.split('.')[0] == enforce_prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
| Python | 0.000042 |
213ddc9ffbb171c17c051c6394baa0499abfc820 | fix UnboundLocalError | corehq/util/tests/test_log.py | corehq/util/tests/test_log.py | from __future__ import absolute_import
from __future__ import unicode_literals
import six
from django.test import SimpleTestCase
from ..log import clean_exception
class TestLogging(SimpleTestCase):
def test_bad_traceback(self):
result = "JJackson's SSN: 555-55-5555"
exception = None
try:
# copied from couchdbkit/client.py
assert isinstance(result, dict), 'received an invalid ' \
'response of type %s: %s' % (type(result), repr(result))
except AssertionError as e:
exception = e
self.assertIn(result, six.text_type(exception))
self.assertNotIn(result, six.text_type(clean_exception(exception)))
def test_that_I_didnt_break_anything(self):
exception = AssertionError("foo")
cleaned_exception = clean_exception(exception)
self.assertEqual(exception.__class__, cleaned_exception.__class__)
self.assertEqual(six.text_type(exception), six.text_type(cleaned_exception))
| from __future__ import absolute_import
from __future__ import unicode_literals
import six
from django.test import SimpleTestCase
from ..log import clean_exception
class TestLogging(SimpleTestCase):
def test_bad_traceback(self):
result = "JJackson's SSN: 555-55-5555"
try:
# copied from couchdbkit/client.py
assert isinstance(result, dict), 'received an invalid ' \
'response of type %s: %s' % (type(result), repr(result))
except AssertionError as e:
pass
self.assertIn(result, six.text_type(e))
self.assertNotIn(result, six.text_type(clean_exception(e)))
def test_that_I_didnt_break_anything(self):
exception = AssertionError("foo")
cleaned_exception = clean_exception(exception)
self.assertEqual(exception.__class__, cleaned_exception.__class__)
self.assertEqual(six.text_type(exception), six.text_type(cleaned_exception))
| Python | 0.000002 |
24b85059dcc5c17d21011bc7d1975f519e09837d | Improve formatting | netsecus/__init__.py | netsecus/__init__.py | #!/usr/bin/env python
from __future__ import unicode_literals
import imaplib
import logging
import time
import helper
import rules
# useful for debugging: $ openssl s_client -crlf -connect imap.gmail.com:993
#
# core functions
#
def main():
# patching imaplib
imaplib.Commands["MOVE"] = ("SELECTED",)
imaplib.Commands["IDLE"] = ("AUTH", "SELECTED",)
imaplib.Commands["DONE"] = ("AUTH", "SELECTED",)
helper.setupLogging()
imapmail = loginIMAP(
helper.getConfigValue("login", "imapmail_server"),
helper.getConfigValue("login", "mail_address"),
helper.getConfigValue("login", "mail_password"))
imapmail._command("IDLE")
if "idling" in imapmail.readline().decode("utf-8"):
logging.debug("Server supports IDLE.")
firstRun = True
while True:
if firstRun or "EXISTS" in imapmail.readline().decode("utf-8"):
imapmail._command("DONE")
imapmail.readline()
ruleLoop(imapmail)
imapmail._command("IDLE")
logging.debug("Entering IDLE state.")
firstRun = False
else:
logging.debug("Server lacks support for IDLE... Falling back to delay.")
while True:
ruleLoop(imapmail)
time.sleep(helper.getConfigValue("settings", "delay"))
def ruleLoop(imapmail):
for rule in helper.getConfigValue("rules"):
processRule(imapmail, rule)
def processRule(imapmail, rule):
logging.debug("**** rule: '%s'" % rule["title"])
mails = []
for step in rule["steps"]:
logging.debug("* exec: %s" % step[0])
mails = getattr(rules, step[0])(imapmail, mails, *step[1:])
if not isinstance(mails, list):
mails = [mails]
if not mails:
logging.debug("* ret no mails")
break
logging.debug("* ret %d mail(s)" % len(mails))
logging.debug("**** done: '%s'" % rule["title"])
def loginIMAP(server, address, password):
imapmail = imaplib.IMAP4_SSL(server)
imapmail.login(address, password)
imapmail.select()
logging.info("IMAP login (%s on %s)" % (address, server))
return imapmail
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from __future__ import unicode_literals
import imaplib
import logging
import time
import helper
import rules
# useful for debugging: $ openssl s_client -crlf -connect imap.gmail.com:993
#
# core functions
#
def main():
# patching imaplib
imaplib.Commands["MOVE"] = ("SELECTED",)
imaplib.Commands["IDLE"] = ("AUTH", "SELECTED",)
imaplib.Commands["DONE"] = ("AUTH", "SELECTED",)
helper.setupLogging()
imapmail = loginIMAP(helper.getConfigValue("login", "imapmail_server"), helper.getConfigValue(
"login", "mail_address"), helper.getConfigValue("login", "mail_password"))
imapmail._command("IDLE")
if "idling" in imapmail.readline().decode("utf-8"):
logging.debug("Server supports IDLE.")
firstRun = True
while True:
if firstRun or "EXISTS" in imapmail.readline().decode("utf-8"):
imapmail._command("DONE")
imapmail.readline()
ruleLoop(imapmail)
imapmail._command("IDLE")
logging.debug("Entering IDLE state.")
firstRun = False
else:
logging.debug("Server lacks support for IDLE... Falling back to delay.")
while True:
ruleLoop(imapmail)
time.sleep(helper.getConfigValue("settings", "delay"))
def ruleLoop(imapmail):
for rule in helper.getConfigValue("rules"):
processRule(imapmail, rule)
def processRule(imapmail, rule):
logging.debug("**** rule: '%s'" % rule["title"])
mails = []
for step in rule["steps"]:
logging.debug("* exec: %s" % step[0])
mails = getattr(rules, step[0])(imapmail, mails, *step[1:])
if not isinstance(mails, list):
mails = [mails]
if not mails:
logging.debug("* ret no mails")
break
logging.debug("* ret %d mail(s)" % len(mails))
logging.debug("**** done: '%s'" % rule["title"])
def loginIMAP(server, address, password):
imapmail = imaplib.IMAP4_SSL(server)
imapmail.login(address, password)
imapmail.select()
logging.info("IMAP login (%s on %s)" % (address, server))
return imapmail
if __name__ == "__main__":
main()
| Python | 0.99985 |
c492c42639f7a487dc27a95a5a785dd9c62ecdb7 | Change project status formatting | clowder/utility/print_utilities.py | clowder/utility/print_utilities.py | """Print utilities"""
import os
from termcolor import colored
from clowder.utility.git_utilities import (
git_current_sha,
git_current_branch,
git_is_detached,
git_is_dirty
)
def print_project_status(root_directory, path, name):
"""Print repo status"""
repo_path = os.path.join(root_directory, path)
git_path = os.path.join(repo_path, '.git')
if not os.path.isdir(git_path):
return
if git_is_dirty(repo_path):
color = 'red'
symbol = '*'
else:
color = 'green'
symbol = ''
project_output = colored(symbol + name, color)
if git_is_detached(repo_path):
current_ref = git_current_sha(repo_path)
current_ref_output = colored('(HEAD @ ' + current_ref + ')', 'magenta')
else:
current_branch = git_current_branch(repo_path)
current_ref_output = colored('(' + current_branch + ')', 'magenta')
path_output = colored(path, 'cyan')
print(project_output)
print(current_ref_output + ' ' + path_output)
| """Print utilities"""
import os
from termcolor import colored
from clowder.utility.git_utilities import (
git_current_sha,
git_current_branch,
git_is_detached,
git_is_dirty
)
def print_project_status(root_directory, path, name):
"""Print repo status"""
repo_path = os.path.join(root_directory, path)
git_path = os.path.join(repo_path, '.git')
if not os.path.isdir(git_path):
return
if git_is_dirty(repo_path):
color = 'red'
symbol = '*'
else:
color = 'green'
symbol = ''
project_output = colored(symbol + name, color)
if git_is_detached(repo_path):
current_ref = git_current_sha(repo_path)
current_ref_output = colored('(HEAD @ ' + current_ref + ')', 'magenta')
else:
current_branch = git_current_branch(repo_path)
current_ref_output = colored('(' + current_branch + ')', 'magenta')
path_output = colored(path, 'cyan')
print(project_output + ' @ ' + path_output)
print(current_ref_output)
| Python | 0 |
b82cc5adba91610093621fefc5121393d7a8bd35 | Split ignored_paths | coalib/parsing/DefaultArgParser.py | coalib/parsing/DefaultArgParser.py | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from coalib.misc.i18n import _
default_arg_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
default_arg_parser.add_argument('-f', '--files', nargs='+', metavar='FILE', dest='allowed_files',
help=_('Files that should be checked'))
default_arg_parser.add_argument('-D', '--flat-dirs', nargs='+', metavar='DIR', dest='flat_directories',
help=_('Directories of files that should be checked, excluding sub directories'))
default_arg_parser.add_argument('-d', '--rec-dirs', nargs='+', metavar='DIR', dest='recursive_directories',
help=_('Directories of files that should be checked, including sub directories'))
default_arg_parser.add_argument('-t', '--allowed', nargs='+', metavar='TYPE', dest='allowed_file_types',
help=_('File types of files to be checked'))
default_arg_parser.add_argument('-F', '--forbidden', nargs='+', metavar='TYPE', dest='forbidden_file_types',
help=_('File types not to be checked'))
default_arg_parser.add_argument('-i', '--ignored-files', nargs='+', metavar='PATH', dest='ignored_files',
help=_('Files that should be ignored'))
default_arg_parser.add_argument('-p', '--ignored-dirs', nargs='+', metavar='PATH', dest='ignored_dirs',
help=_('Directories that should be ignored'))
default_arg_parser.add_argument('-B', '--bear-dirs', nargs='+', metavar='DIR', dest='bear-directories',
help=_('Directories to look in for bears'))
default_arg_parser.add_argument('-b', '--bears', nargs='+', metavar='NAME', dest='bears',
help=_('Names of bears to use'))
default_arg_parser.add_argument('-I', '--ignored_bears', nargs='+', metavar='REGEX', dest='ignored_bears',
help=_('Names of bears not to use'))
default_arg_parser.add_argument('-r', '--regex-bears', nargs='+', metavar='REGEX', dest='regex_bears',
help=_('Regular expressions matching bears to use'))
default_arg_parser.add_argument('-l', '--log', nargs=1, choices=['CONSOLE', 'TXT', 'HTML'], metavar='ENUM',
dest='log_type', help=_("Enum('CONSOLE','TXT','HTML') to determine type of logging"))
default_arg_parser.add_argument('-L', '--log_level', nargs=1, choices=['ERR', 'WARN', 'INFO', 'DEBUG'],
metavar='ENUM', dest='log_level',
help=_("Enum('ERR','WARN','INFO','DEBUG') to set level of log output"))
default_arg_parser.add_argument('-o', '--output', nargs=1, metavar='FILE', dest='output',
help=_('Location of lot output'))
default_arg_parser.add_argument('-c', '--config', nargs='+', metavar='FILE', dest='config',
help=_('Configuration file to be used'))
default_arg_parser.add_argument('-s', '--save', nargs='?', const=True, metavar='FILE', dest='save',
help=_('Filename of file to be saved to, defaults to config file'))
default_arg_parser.add_argument('-j', '--job-count', nargs=1, type=int, metavar='INT', dest='job_count',
help=_('Number of processes to be allowed to run at once'))
default_arg_parser.add_argument('-C', '--apply-changes', nargs=1, choices=['YES', 'NO', 'ASK'], metavar='ENUM',
dest='apply_changes', help=_("Enum('YES','NO','ASK') to set whether to apply changes"))
| """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from coalib.misc.i18n import _
default_arg_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
default_arg_parser.add_argument('-f', '--files', nargs='+', metavar='FILE', dest='allowed_files',
help=_('Files that should be checked'))
default_arg_parser.add_argument('-D', '--flat-dirs', nargs='+', metavar='DIR', dest='flat_directories',
help=_('Directories of files that should be checked, excluding sub directories'))
default_arg_parser.add_argument('-d', '--rec-dirs', nargs='+', metavar='DIR', dest='recursive_directories',
help=_('Directories of files that should be checked, including sub directories'))
default_arg_parser.add_argument('-t', '--allowed', nargs='+', metavar='TYPE', dest='allowed_file_types',
help=_('File types of files to be checked'))
default_arg_parser.add_argument('-F', '--forbidden', nargs='+', metavar='TYPE', dest='forbidden_file_types',
help=_('File types not to be checked'))
default_arg_parser.add_argument('-i', '--ignored', nargs='+', metavar='PATH', dest='ignored_paths',
help=_('Files or directories that should be ignored'))
default_arg_parser.add_argument('-B', '--bear-dirs', nargs='+', metavar='DIR', dest='bear-directories',
help=_('Directories to look in for bears'))
default_arg_parser.add_argument('-b', '--bears', nargs='+', metavar='NAME', dest='bears',
help=_('Names of bears to use'))
default_arg_parser.add_argument('-I', '--ignored_bears', nargs='+', metavar='REGEX', dest='ignored_bears',
help=_('Names of bears not to use'))
default_arg_parser.add_argument('-r', '--regex-bears', nargs='+', metavar='REGEX', dest='regex_bears',
help=_('Regular expressions matching bears to use'))
default_arg_parser.add_argument('-l', '--log', nargs=1, choices=['CONSOLE', 'TXT', 'HTML'], metavar='ENUM',
dest='log_type', help=_("Enum('CONSOLE','TXT','HTML') to determine type of logging"))
default_arg_parser.add_argument('-L', '--log_level', nargs=1, choices=['ERR', 'WARN', 'INFO', 'DEBUG'],
metavar='ENUM', dest='log_level',
help=_("Enum('ERR','WARN','INFO','DEBUG') to set level of log output"))
default_arg_parser.add_argument('-o', '--output', nargs=1, metavar='FILE', dest='output',
help=_('Location of lot output'))
default_arg_parser.add_argument('-c', '--config', nargs='+', metavar='FILE', dest='config',
help=_('Configuration file to be used'))
default_arg_parser.add_argument('-s', '--save', nargs='?', const=True, metavar='FILE', dest='save',
help=_('Filename of file to be saved to, defaults to config file'))
default_arg_parser.add_argument('-j', '--job-count', nargs=1, type=int, metavar='INT', dest='job_count',
help=_('Number of processes to be allowed to run at once'))
default_arg_parser.add_argument('-C', '--apply-changes', nargs=1, choices=['YES', 'NO', 'ASK'], metavar='ENUM',
dest='apply_changes', help=_("Enum('YES','NO','ASK') to set whether to apply changes"))
| Python | 0 |
d4f86c8b9ced020f842a4321b4108c9372d7b4ec | Add the staticfiles app. | coda/coda_project/settings/base.py | coda/coda_project/settings/base.py | # Base settings for coda_project
import os
import json
from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
# Absolute path to the settings module
SETTINGS_ROOT = os.path.dirname(__file__)
# Absolute path to the project
PROJECT_ROOT = os.path.dirname(SETTINGS_ROOT)
# Absolute path to the site directory
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
# Compose a path from the project root
project_path = lambda path: os.path.join(PROJECT_ROOT, path)
# Compose path from the site root
site_path = lambda path: os.path.join(SITE_ROOT, path)
# Get our secrets from a file outside of version control.
# This helps to keep the settings files generic.
with open(os.path.join(SETTINGS_ROOT, "secrets.json")) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets[setting]
except KeyError:
error_msg = "The {0} secret is not set.".format(setting)
raise ImproperlyConfigured(error_msg)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MAINTENANCE_MSG = get_secret("MAINTENANCE_MSG")
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(SITE_ROOT, 'static')]
SECRET_KEY = get_secret("SECRET_KEY")
DATABASES = {
'default': {
'NAME': get_secret("DB_NAME"),
'USER': get_secret("DB_USER"),
'ENGINE': 'django.db.backends.mysql',
'PASSWORD': get_secret("DB_PASSWORD"),
'HOST': get_secret("DB_HOST"),
'PORT': get_secret("DB_PORT"),
}
}
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',)
ROOT_URLCONF = 'coda_project.urls'
TEMPLATE_DIRS = (
site_path('templates'),)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',)
# Let the view know if we are in "proxy mode" or not.
# this uses the coda instance as a reverse proxy for the archival storage nodes
# setting to false sends requests directly to the archival servers.
CODA_PROXY_MODE = False
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.admindocs',
'django.contrib.admin',
'django.contrib.humanize',)
THIRD_PARTY_APPS = (
'premis_event_service',
)
LOCAL_APPS = (
'coda_mdstore',
'coda_replication',
'coda_oaipmh',
'coda_validate',)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
VALIDATION_PERIOD = timedelta(days=365)
| # Base settings for coda_project
import os
import json
from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
# Absolute path to the settings module
SETTINGS_ROOT = os.path.dirname(__file__)
# Absolute path to the project
PROJECT_ROOT = os.path.dirname(SETTINGS_ROOT)
# Absolute path to the site directory
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
# Compose a path from the project root
project_path = lambda path: os.path.join(PROJECT_ROOT, path)
# Compose path from the site root
site_path = lambda path: os.path.join(SITE_ROOT, path)
# Get our secrets from a file outside of version control.
# This helps to keep the settings files generic.
with open(os.path.join(SETTINGS_ROOT, "secrets.json")) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets[setting]
except KeyError:
error_msg = "The {0} secret is not set.".format(setting)
raise ImproperlyConfigured(error_msg)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MAINTENANCE_MSG = get_secret("MAINTENANCE_MSG")
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
MEDIA_ROOT = site_path('media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
SECRET_KEY = get_secret("SECRET_KEY")
DATABASES = {
'default': {
'NAME': get_secret("DB_NAME"),
'USER': get_secret("DB_USER"),
'ENGINE': 'django.db.backends.mysql',
'PASSWORD': get_secret("DB_PASSWORD"),
'HOST': get_secret("DB_HOST"),
'PORT': get_secret("DB_PORT"),
}
}
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',)
ROOT_URLCONF = 'coda_project.urls'
TEMPLATE_DIRS = (
site_path('templates'),)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',)
# Let the view know if we are in "proxy mode" or not.
# this uses the coda instance as a reverse proxy for the archival storage nodes
# setting to false sends requests directly to the archival servers.
CODA_PROXY_MODE = False
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.admindocs',
'django.contrib.admin',
'django.contrib.humanize',)
THIRD_PARTY_APPS = (
'premis_event_service',
)
LOCAL_APPS = (
'coda_mdstore',
'coda_replication',
'coda_oaipmh',
'coda_validate',)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
VALIDATION_PERIOD = timedelta(days=365)
| Python | 0 |
6565627e44566f1230ea8c3176678d604c020050 | Generate network with bias properly | network_simulator.py | network_simulator.py | import numpy as np
from scipy import stats
def exp_cosh(H, beta=1.0):
return 0.5 * np.exp(beta * H) / np.cosh(beta * H)
def kinetic_ising_model(S, J):
""" Returns probabilities of S[t+1,:] being one.
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix
:return: numpy.ndarray (T,N)
Probabilities that at time point t+1 neuron n fires
"""
# compute fields
H = np.dot(S, J)
# compute probabilities
p = exp_cosh(H)
# return
return p
def spike_and_slab(ro, N, bias=0, v_s=1.0):
''' This function generate spike and priors
:param ro: sparsity
:param N: number of neurons
:param bias: 1 if bias is included in the model, 0 other wise
:return:
'''
gamma = stats.bernoulli.rvs(p=ro, size=(N + bias, N))
if bias:
gamma[N, :] = 1
normal_dist = np.random.normal(0.0, v_s, (N + bias, N))
return gamma * normal_dist
def generate_spikes(N, T, S0, J, bias=0, no_spike=-1, save=False):
""" Generates spike data according to kinetic Ising model
with a spike and slab prior.
:param J: numpy.ndarray (N, N)
Coupling matrix.
:param T: int
Length of trajectory that is generated.
:param S0: numpy.ndarray (N)
Initial pattern that sampling started from.
:param bias: 1 if bias is included in the model. 0 other wise.
:param no_spike: what number should represent 'no_spike'. Default is -1.
:return: numpy.ndarray (T, N)
Binary data where an entry is either 1 ('spike') or -1 ('silence'). First row is only ones for external fields.
"""
# Initialize array for data
S = np.empty([T, N + bias])
# Set initial spike pattern
S[0] = S0 if no_spike == -1 else np.zeros(N + bias)
# Last column in the activity matrix is of the bias and should be 1 at all times
if bias:
S[:, N] = 1
# Generate random numbers
X = np.random.rand(T - 1, N)
# Iterate through all time points
for i in range(1, T):
# Compute probabilities of neuron firing
p = kinetic_ising_model(np.array([S[i - 1]]), J)
# Check if spike or not
if no_spike == -1:
S[i, :N] = 2 * (X[i - 1] < p) - 1
else:
S[i, :N] = 2 * (X[i - 1] < p) / 2.0
S = S
return S
| import numpy as np
from scipy import stats
def exp_cosh(H, beta=1.0):
return 0.5 * np.exp(beta * H) / np.cosh(beta * H)
def kinetic_ising_model(S, J):
""" Returns probabilities of S[t+1,:] being one.
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix
:return: numpy.ndarray (T,N)
Probabilities that at time point t+1 neuron n fires
"""
# compute fields
H = np.dot(S, J)
# compute probabilities
p = exp_cosh(H)
# return
return p
def spike_and_slab(ro, N, bias=0, v_s=1.0):
''' This function generate spike and priors
:param ro: sparsity
:param N: number of neurons
:param bias: 1 if bias is included in the model, 0 other wise
:return:
'''
gamma = stats.bernoulli.rvs(p=ro, size=(N + bias, N))
normal_dist = np.random.normal(0.0, v_s, (N + bias, N))
return gamma * normal_dist
def generate_spikes(N, T, S0, J, bias=0, no_spike=-1, save=False):
""" Generates spike data according to kinetic Ising model
with a spike and slab prior.
:param J: numpy.ndarray (N, N)
Coupling matrix.
:param T: int
Length of trajectory that is generated.
:param S0: numpy.ndarray (N)
Initial pattern that sampling started from.
:param bias: 1 if bias is included in the model. 0 other wise.
:param no_spike: what number should represent 'no_spike'. Default is -1.
:return: numpy.ndarray (T, N)
Binary data where an entry is either 1 ('spike') or -1 ('silence'). First row is only ones for external fields.
"""
# Initialize array for data
S = np.empty([T, N + bias])
# Set initial spike pattern
S[0] = S0 if no_spike == -1 else np.zeros(N + bias)
# Last column in the activity matrix is of the bias and should be 1 at all times
if bias:
S[:, N] = 1
# Generate random numbers
X = np.random.rand(T - 1, N)
# Iterate through all time points
for i in range(1, T):
# Compute probabilities of neuron firing
p = kinetic_ising_model(np.array([S[i - 1]]), J)
# Check if spike or not
if no_spike == -1:
S[i, :N] = 2 * (X[i - 1] < p) - 1
else:
S[i, :N] = 2 * (X[i - 1] < p) / 2.0
S = S
return S
| Python | 0.999129 |
70021d5df6beb0e8eb5b78a6484cbb650a7a1fb6 | fix docs | cupyx/distributed/__init__.py | cupyx/distributed/__init__.py | from cupyx.distributed._init import init_process_group # NOQA
from cupyx.distributed._comm import Backend # NOQA
from cupyx.distributed._nccl_comm import NCCLBackend # NOQA
| from cupyx.distributed._init import init_process_group # NOQA
from cupyx.distributed._nccl_comm import NCCLBackend # NOQA
| Python | 0.000001 |
3061affd313aff39f722e6e5846a3191d6592a7d | fix FaqQuestionSitemap URLs | fluent_faq/sitemaps.py | fluent_faq/sitemaps.py | from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import NoReverseMatch
from fluent_faq.models import FaqCategory, FaqQuestion
from fluent_faq.urlresolvers import faq_reverse
def _url_patterns_installed():
# This module can use normal Django urls.py URLs, or mount the "FaqPage" in the page tree.
# Check whether the URLs are installed, so the `sitemap.xml` can be generated nevertheless.
# This issue will pop up elsewhere too, so there is no need to raise an error here.
try:
faq_reverse('faqcategory_detail', kwargs={'slug': 'category'}, ignore_multiple=True)
except NoReverseMatch:
return False
else:
return True
class FaqQuestionSitemap(Sitemap):
"""
Sitemap for FAQ questions
"""
def items(self):
if not _url_patterns_installed():
return None
return FaqQuestion.objects.published().select_related('category')
def lastmod(self, question):
"""Return the last modification of the object."""
return question.modification_date
def location(self, question):
"""Return url of an question."""
return faq_reverse('faqquestion_detail', kwargs={'cat_slug': question.category.slug, 'slug': question.slug}, ignore_multiple=True)
class FaqCategorySitemap(Sitemap):
"""
Sitemap for FAQ categories.
"""
def items(self):
if not _url_patterns_installed():
return None
return FaqCategory.objects.published()
def lastmod(self, category):
"""Return the last modification of the object."""
return category.modification_date
def location(self, category):
"""Return url of an category."""
return faq_reverse('faqcategory_detail', kwargs={'slug': category.slug}, ignore_multiple=True)
| from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import NoReverseMatch
from fluent_faq.models import FaqCategory, FaqQuestion
from fluent_faq.urlresolvers import faq_reverse
def _url_patterns_installed():
# This module can use normal Django urls.py URLs, or mount the "FaqPage" in the page tree.
# Check whether the URLs are installed, so the `sitemap.xml` can be generated nevertheless.
# This issue will pop up elsewhere too, so there is no need to raise an error here.
try:
faq_reverse('faqcategory_detail', kwargs={'slug': 'category'}, ignore_multiple=True)
except NoReverseMatch:
return False
else:
return True
class FaqQuestionSitemap(Sitemap):
"""
Sitemap for FAQ questions
"""
def items(self):
if not _url_patterns_installed():
return None
return FaqQuestion.objects.published()
def lastmod(self, category):
"""Return the last modification of the object."""
return category.modification_date
def location(self, category):
"""Return url of an question."""
return faq_reverse('faqcategory_detail', kwargs={'slug': category.slug}, ignore_multiple=True)
class FaqCategorySitemap(Sitemap):
"""
Sitemap for FAQ categories.
"""
def items(self):
if not _url_patterns_installed():
return None
return FaqCategory.objects.published()
def lastmod(self, category):
"""Return the last modification of the object."""
return category.modification_date
def location(self, category):
"""Return url of an category."""
return faq_reverse('faqcategory_detail', kwargs={'slug': category.slug}, ignore_multiple=True)
| Python | 0.000164 |
79e7ef509e4757c29d6fa0bd9161410aadbd305a | fix os.path.expand [sic] typo, and refactor | salt/utils/xdg.py | salt/utils/xdg.py | # -*- coding: utf-8 -*-
'''
Create an XDG function to get the config dir
'''
import os
def xdg_config_dir(config_dir=None):
'''
Check xdg locations for config files
'''
xdg_config = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
xdg_config_dir = os.path.join(xdg_config, 'salt')
if os.path.isdir(xdg_config_dir):
return xdg_config_dir
else:
if config_dir is None:
config_dir = '~/.'
else:
config_dir = os.path.join('~/.', config_dir)
return os.path.expanduser(config_dir)
| # -*- coding: utf-8 -*-
'''
Create an XDG function to get the config dir
'''
import os
def xdg_config_dir(config_dir=None):
'''
Check xdg locations for config files
'''
xdg_config = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
xdg_config_dir = os.path.join(xdg_config, 'salt')
if os.path.isdir(xdg_config_dir):
return xdg_config_dir
else:
if config_dir is None:
return os.path.expanduser('~/.')
else:
return os.path.expand(os.path.join('~/.', config_dir))
| Python | 0.000004 |
237fcac11a12a1fb0eba32a4fc516cb449f15577 | Fix bounce test | froide/bounce/tests.py | froide/bounce/tests.py | import os
import unittest
from datetime import datetime, timedelta
from django.test import TestCase
from django.db import connection
from froide.helper.email_utils import EmailParser
from froide.foirequest.tests.factories import UserFactory
from .models import Bounce
from .utils import (
make_bounce_address, add_bounce_mail, check_user_deactivation,
get_recipient_address_from_bounce
)
TEST_DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata'))
def p(path):
return os.path.join(TEST_DATA_ROOT, path)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class BounceTest(TestCase):
def setUp(self):
self.email = 'nonexistant@example.org'
def test_bounce_address(self):
email = 'Upper_Case@example.org'
bounce_address = make_bounce_address(email)
self.assertEqual(bounce_address, bounce_address.lower())
recovered_email, status = get_recipient_address_from_bounce(bounce_address)
self.assertEqual(recovered_email, email.lower())
self.assertTrue(status)
def test_bounce_parsing(self):
parser = EmailParser()
with open(p("bounce_001.txt"), 'rb') as f:
email = parser.parse(f)
bounce_address = make_bounce_address(self.email)
email.to = [('', bounce_address)]
bounce_info = email.bounce_info
self.assertTrue(bounce_info.is_bounce)
self.assertEqual(bounce_info.bounce_type, 'hard')
self.assertEqual(bounce_info.status, (5, 5, 3))
add_bounce_mail(email)
bounce = Bounce.objects.get(email=self.email)
self.assertEqual(bounce.email, self.email)
self.assertIsNone(bounce.user)
self.assertEqual(len(bounce.bounces), 1)
def test_bounce_parsing_2(self):
parser = EmailParser()
with open(p("bounce_002.txt"), 'rb') as f:
email = parser.parse(f)
bounce_address = make_bounce_address(self.email)
email.to = [('', bounce_address)]
bounce_info = email.bounce_info
self.assertTrue(bounce_info.is_bounce)
self.assertEqual(bounce_info.bounce_type, 'hard')
self.assertEqual(bounce_info.status, (5, 1, 1))
def test_bounce_handling(self):
def days_ago(days):
return (datetime.now() - timedelta(days=days)).isoformat()
def bounce_factory(days, bounce_type='hard'):
return [{
'is_bounce': True, 'bounce_type': bounce_type,
'timestamp': days_ago(day)}
for day in days
]
bounce = Bounce(user=None, email='a@example.org',
bounces=bounce_factory([1, 5]))
result = check_user_deactivation(bounce)
self.assertIsNone(result)
user = UserFactory()
bounce = Bounce(user=user, email=user.email,
bounces=bounce_factory([1, 5]))
result = check_user_deactivation(bounce)
self.assertFalse(result)
user = UserFactory()
bounce = Bounce(user=user, email=user.email,
bounces=bounce_factory([1, 5, 12]))
result = check_user_deactivation(bounce)
self.assertTrue(result)
user = UserFactory()
bounce = Bounce(
user=user, email=user.email,
bounces=bounce_factory([1, 5, 12], bounce_type='soft'))
result = check_user_deactivation(bounce)
self.assertFalse(result)
| import os
import unittest
from datetime import datetime, timedelta
from django.test import TestCase
from django.db import connection
from froide.helper.email_utils import EmailParser
from froide.foirequest.tests.factories import UserFactory
from .models import Bounce
from .utils import (
make_bounce_address, add_bounce_mail, check_user_deactivation,
get_recipient_address_from_bounce
)
TEST_DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata'))
def p(path):
return os.path.join(TEST_DATA_ROOT, path)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class BounceTest(TestCase):
def setUp(self):
self.email = 'nonexistant@example.org'
def test_bounce_address(self):
email = 'Upper_Case@example.org'
bounce_address = make_bounce_address(email)
self.assertEqual(bounce_address, bounce_address.lower())
recovered_email, status = get_recipient_address_from_bounce(bounce_address)
self.assertEqual(recovered_email, email.lower())
self.assertTrue(status)
def test_bounce_parsing(self):
parser = EmailParser()
with open(p("bounce_001.txt"), 'rb') as f:
email = parser.parse(f)
bounce_address = make_bounce_address(self.email)
email.to = [('', bounce_address)]
bounce_info = email.bounce_info
self.assertTrue(bounce_info.is_bounce)
self.assertEqual(bounce_info.bounce_type, 'hard')
self.assertEqual(bounce_info.status, (5, 0, 0))
add_bounce_mail(email)
bounce = Bounce.objects.get(email=self.email)
self.assertEqual(bounce.email, self.email)
self.assertIsNone(bounce.user)
self.assertEqual(len(bounce.bounces), 1)
def test_bounce_parsing_2(self):
parser = EmailParser()
with open(p("bounce_002.txt"), 'rb') as f:
email = parser.parse(f)
bounce_address = make_bounce_address(self.email)
email.to = [('', bounce_address)]
bounce_info = email.bounce_info
self.assertTrue(bounce_info.is_bounce)
self.assertEqual(bounce_info.bounce_type, 'hard')
self.assertEqual(bounce_info.status, (5, 1, 1))
def test_bounce_handling(self):
def days_ago(days):
return (datetime.now() - timedelta(days=days)).isoformat()
def bounce_factory(days, bounce_type='hard'):
return [{
'is_bounce': True, 'bounce_type': bounce_type,
'timestamp': days_ago(day)}
for day in days
]
bounce = Bounce(user=None, email='a@example.org',
bounces=bounce_factory([1, 5]))
result = check_user_deactivation(bounce)
self.assertIsNone(result)
user = UserFactory()
bounce = Bounce(user=user, email=user.email,
bounces=bounce_factory([1, 5]))
result = check_user_deactivation(bounce)
self.assertFalse(result)
user = UserFactory()
bounce = Bounce(user=user, email=user.email,
bounces=bounce_factory([1, 5, 12]))
result = check_user_deactivation(bounce)
self.assertTrue(result)
user = UserFactory()
bounce = Bounce(
user=user, email=user.email,
bounces=bounce_factory([1, 5, 12], bounce_type='soft'))
result = check_user_deactivation(bounce)
self.assertFalse(result)
| Python | 0.000002 |
e6db95cce0239d9e8ce33aec5cf21aa1bd19df03 | Add __str__ method | imagersite/imager_profile/models.py | imagersite/imager_profile/models.py | import six
from django.db import models
from django.contrib.auth.models import User
@six.python_2_unicode_compatible
class ImagerProfile(models.Model):
user = models.OneToOneField(User)
fav_camera = models.CharField(max_length=30)
address = models.CharField(max_length=100)
web_url = models.URLField()
type_photography = models.CharField(max_length=30)
def __str__(self):
return "{}'s profile".format(self.user.username)
|
from django.db import models
from django.contrib.auth.models import User
import six
@six.python_2_unicode_compatible
class ImagerProfile(models.Model):
user = models.OneToOneField(User)
fav_camera = models.CharField(max_length=30)
address = models.CharField(max_length=100)
web_url = models.URLField()
type_photography = models.CharField(max_length=30)
| Python | 0.999825 |
e9f986a0ade7d08a56157641efb49366f3c54bcc | Add a create column migration for canonical_bug_link | mysite/search/migrations/0016_add_looks_closed_column.py | mysite/search/migrations/0016_add_looks_closed_column.py |
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Bug.looks_closed'
db.add_column('search_bug', 'looks_closed', orm['search.bug:looks_closed'])
db.add_column('search_bug', 'canonical_bug_link', orm['search.bug:canonical_bug_link'])
def backwards(self, orm):
# Deleting field 'Bug.looks_closed'
db.delete_column('search_bug', 'looks_closed')
db.delete_column('search_bug', 'canonical_bug_link')
models = {
'search.bug': {
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
|
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Bug.looks_closed'
db.add_column('search_bug', 'looks_closed', orm['search.bug:looks_closed'])
def backwards(self, orm):
# Deleting field 'Bug.looks_closed'
db.delete_column('search_bug', 'looks_closed')
models = {
'search.bug': {
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
| Python | 0.000001 |
18bb441017c26b850eeb84fda576c613a08238b1 | Fix the case when Paths.CWD is None | sc2/sc2process.py | sc2/sc2process.py | import sys
import signal
import time
import asyncio
import os.path
import shutil
import tempfile
import subprocess
import portpicker
import websockets
import logging
logger = logging.getLogger(__name__)
from .paths import Paths
from .protocol import Protocol
from .controller import Controller
class kill_switch(object):
_to_kill = []
@classmethod
def add(cls, value):
cls._to_kill.append(value)
@classmethod
def kill_all(cls):
logger.info("kill_switch: Process cleanup")
for p in cls._to_kill:
p._clean()
class SC2Process(object):
def __init__(self, host="127.0.0.1", port=None, fullscreen=False):
assert isinstance(host, str)
assert isinstance(port, int) or port is None
self._fullscreen = fullscreen
self._host = host
if port is None:
self._port = portpicker.pick_unused_port()
else:
self._port = port
self._tmp_dir = tempfile.mkdtemp(prefix="SC2_")
self._process = None
self._ws = None
async def __aenter__(self):
kill_switch.add(self)
def signal_handler(signal, frame):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal_handler)
try:
self._process = self._launch()
self._ws = await self._connect()
except:
self._clean()
raise
return Controller(self._ws)
async def __aexit__(self, *args):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal.SIG_DFL)
@property
def ws_url(self):
return f"ws://{self._host}:{self._port}/sc2api"
def _launch(self):
return subprocess.Popen([
str(Paths.EXECUTABLE),
"-listen", self._host,
"-port", str(self._port),
"-displayMode", "1" if self._fullscreen else "0",
"-dataDir", str(Paths.BASE),
"-tempDir", self._tmp_dir
],
cwd=(str(Paths.CWD) if Paths.CWD else None),
#, env=run_config.env
)
async def _connect(self):
for _ in range(30):
await asyncio.sleep(1)
try:
ws = await websockets.connect(self.ws_url, timeout=120)
return ws
except ConnectionRefusedError:
pass
raise TimeoutError("Websocket")
def _clean(self):
logger.info("Cleaning up...")
if self._ws is not None:
self._ws.close()
if self._process is not None:
if self._process.poll() is None:
for _ in range(3):
self._process.terminate()
time.sleep(2)
if self._process.poll() is not None:
break
else:
self._process.kill()
self._process.wait()
logger.error("KILLED")
if os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
self._process = None
self._ws = None
logger.info("Cleanup complete")
| import sys
import signal
import time
import asyncio
import os.path
import shutil
import tempfile
import subprocess
import portpicker
import websockets
import logging
logger = logging.getLogger(__name__)
from .paths import Paths
from .protocol import Protocol
from .controller import Controller
class kill_switch(object):
_to_kill = []
@classmethod
def add(cls, value):
cls._to_kill.append(value)
@classmethod
def kill_all(cls):
logger.info("kill_switch: Process cleanup")
for p in cls._to_kill:
p._clean()
class SC2Process(object):
def __init__(self, host="127.0.0.1", port=None, fullscreen=False):
assert isinstance(host, str)
assert isinstance(port, int) or port is None
self._fullscreen = fullscreen
self._host = host
if port is None:
self._port = portpicker.pick_unused_port()
else:
self._port = port
self._tmp_dir = tempfile.mkdtemp(prefix="SC2_")
self._process = None
self._ws = None
async def __aenter__(self):
kill_switch.add(self)
def signal_handler(signal, frame):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal_handler)
try:
self._process = self._launch()
self._ws = await self._connect()
except:
self._clean()
raise
return Controller(self._ws)
async def __aexit__(self, *args):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal.SIG_DFL)
@property
def ws_url(self):
return f"ws://{self._host}:{self._port}/sc2api"
def _launch(self):
return subprocess.Popen([
str(Paths.EXECUTABLE),
"-listen", self._host,
"-port", str(self._port),
"-displayMode", "1" if self._fullscreen else "0",
"-dataDir", str(Paths.BASE),
"-tempDir", self._tmp_dir
],
cwd=str(Paths.CWD),
#, env=run_config.env
)
async def _connect(self):
for _ in range(30):
await asyncio.sleep(1)
try:
ws = await websockets.connect(self.ws_url, timeout=120)
return ws
except ConnectionRefusedError:
pass
raise TimeoutError("Websocket")
def _clean(self):
logger.info("Cleaning up...")
if self._ws is not None:
self._ws.close()
if self._process is not None:
if self._process.poll() is None:
for _ in range(3):
self._process.terminate()
time.sleep(2)
if self._process.poll() is not None:
break
else:
self._process.kill()
self._process.wait()
logger.error("KILLED")
if os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
self._process = None
self._ws = None
logger.info("Cleanup complete")
| Python | 1 |
93d75e45a277cbdbc551831d6e0462e3e5c430fb | Set keys public again on S3 caching. | funsize/cache/cache.py | funsize/cache/cache.py | """
funsize.database.cache
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is currently a stub file that contains function prototypes for the
caching layer core
"""
import os
from boto.s3.connection import S3Connection
import funsize.utils.oddity as oddity
class Cache(object):
""" Class that provides access to cache
Assumes all keys are hex-encoded SHA512s
Internally converts hex to base64 encoding
"""
def __init__(self, _bucket=os.environ.get('FUNSIZE_S3_UPLOAD_BUCKET')):
""" _bucket : bucket name to use for S3 resources """
if not _bucket:
raise oddity.CacheError("Amazon S3 bucket not set")
# open a connection and get the bucket
self.conn = S3Connection()
self.bucket = self.conn.get_bucket(_bucket)
def _get_cache_internals(self, identifier, category):
""" Method to return cache bucket key based on identifier """
if not identifier:
raise oddity.CacheError('Save object failed without identifier')
if category not in ('diff', 'partial', 'patch'):
raise oddity.CacheError("Category failed for S3 uploading")
bucket_key = "files/%s/%s" % (category, identifier)
return bucket_key
def _create_new_bucket_key(self, identifier, category):
""" Based on identifier and category create a new key in the bucket"""
_key = self._get_cache_internals(identifier, category)
return self.bucket.new_key(_key)
def _get_bucket_key(self, identifier, category):
""" Based on identifier and category retrieve key from bucket """
_key = self._get_cache_internals(identifier, category)
return self.bucket.get_key(_key)
def save(self, string, identifier, category, isfile=False):
""" Saves given file to cache, treats string as a local filepath if
isfile is true. returns hash of file.
"""
# FIXME: What should the behaviour be when we try to save to a
# pre-existing key?
key = self._create_new_bucket_key(identifier, category)
key.change_storage_class("STANDARD")
if isfile:
key.set_contents_from_filename(string)
else:
key.set_contents_from_string(string)
key.set_acl('public-read')
def save_blank_file(self, identifier, category):
""" Method to save a blank file to show a partial has been triggered and
it is being in progress
"""
key = self._create_new_bucket_key(identifier, category)
key.change_storage_class("STANDARD")
key.set_contents_from_string('')
key.set_acl('public-read')
def is_blank_file(self, identifier, category):
""" Function to check if the file is empty or not. To be used to ensure
no second triggering is done for the same partial
Returns True is file exists and is blank, False otherwise
"""
key = self._get_bucket_key(identifier, category)
if not key:
return False
return key.size == 0
def find(self, identifier, category):
""" Checks if file with specified key is in cache
returns True or False depending on whether the file exists
"""
key = self._get_bucket_key(identifier, category)
return bool(key)
def retrieve(self, identifier, category, output_file=None):
""" Retrieve file with the given key
writes the file to the path specified by output_file if present
otherwise returns the file as a binary string/file object
"""
key = self._get_bucket_key(identifier, category)
if output_file:
key.get_contents_to_filename(output_file)
else:
return key.get_contents_as_string()
def delete_from_cache(self, identifier, category):
""" Method to remove a file from cache """
key = self._get_bucket_key(identifier, category)
key.delete()
| """
funsize.database.cache
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is currently a stub file that contains function prototypes for the
caching layer core
"""
import os
from boto.s3.connection import S3Connection
import funsize.utils.oddity as oddity
class Cache(object):
""" Class that provides access to cache
Assumes all keys are hex-encoded SHA512s
Internally converts hex to base64 encoding
"""
def __init__(self, _bucket=os.environ.get('FUNSIZE_S3_UPLOAD_BUCKET')):
""" _bucket : bucket name to use for S3 resources """
if not _bucket:
raise oddity.CacheError("Amazon S3 bucket not set")
# open a connection and get the bucket
self.conn = S3Connection()
self.bucket = self.conn.get_bucket(_bucket)
def _get_cache_internals(self, identifier, category):
""" Method to return cache bucket key based on identifier """
if not identifier:
raise oddity.CacheError('Save object failed without identifier')
if category not in ('diff', 'partial', 'patch'):
raise oddity.CacheError("Category failed for S3 uploading")
bucket_key = "files/%s/%s" % (category, identifier)
return bucket_key
def _create_new_bucket_key(self, identifier, category):
""" Based on identifier and category create a new key in the bucket"""
_key = self._get_cache_internals(identifier, category)
return self.bucket.new_key(_key)
def _get_bucket_key(self, identifier, category):
""" Based on identifier and category retrieve key from bucket """
_key = self._get_cache_internals(identifier, category)
return self.bucket.get_key(_key)
def save(self, string, identifier, category, isfile=False):
""" Saves given file to cache, treats string as a local filepath if
isfile is true. returns hash of file.
"""
# FIXME: What should the behaviour be when we try to save to a
# pre-existing key?
key = self._create_new_bucket_key(identifier, category)
if isfile:
key.set_contents_from_filename(string)
else:
key.set_contents_from_string(string)
def save_blank_file(self, identifier, category):
""" Method to save a blank file to show a partial has been triggered and
it is being in progress
"""
key = self._create_new_bucket_key(identifier, category)
key.set_contents_from_string('')
def is_blank_file(self, identifier, category):
""" Function to check if the file is empty or not. To be used to ensure
no second triggering is done for the same partial
Returns True is file exists and is blank, False otherwise
"""
key = self._get_bucket_key(identifier, category)
if not key:
return False
return key.size == 0
def find(self, identifier, category):
""" Checks if file with specified key is in cache
returns True or False depending on whether the file exists
"""
key = self._get_bucket_key(identifier, category)
return bool(key)
def retrieve(self, identifier, category, output_file=None):
""" Retrieve file with the given key
writes the file to the path specified by output_file if present
otherwise returns the file as a binary string/file object
"""
key = self._get_bucket_key(identifier, category)
if output_file:
key.get_contents_to_filename(output_file)
else:
return key.get_contents_as_string()
def delete_from_cache(self, identifier, category):
""" Method to remove a file from cache """
key = self._get_bucket_key(identifier, category)
key.delete()
| Python | 0 |
37b994f96a7760ae962092e59d603455202f0985 | Document what the interface is for... | usingnamespace/traversal/Archive.py | usingnamespace/traversal/Archive.py | import logging
log = logging.getLogger(__name__)
from zope.interface import Interface
from zope.interface import implementer
from Entry import Entry
class IArchive(Interface):
"""Marker interface for archive contexts"""
@implementer(IArchive)
class ArchiveYear(object):
"""ArchiveYear is the context for this years archives"""
def __init__(self, year):
"""Initialises the context
:year: The year we are trying to get archives for
"""
log.debug("Creating new ArchiveYear: {}".format(year))
if isinstance(year, int):
self.__name__ = '{}'.format(year)
self.year = year
if isinstance(year, basestring):
self.__name__ = year
try:
self.year = int(year)
except ValueError:
raise ValueError('Year is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid month
try:
next_ctx = ArchiveYearMonth(key)
except ValueError, e:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
@implementer(IArchive)
class ArchiveYearMonth(object):
"""ArchiveYearMonth is the context for the year/month archives"""
def __init__(self, month):
"""Initialises the context
:month: The month we are getting archives for
"""
log.debug("Creating new ArchiveYearMonth: {}".format(month))
if isinstance(month, int):
self.__name__ = '{}'.format(month)
self.month = month
if isinstance(month, basestring):
self.__name__ = month
try:
self.month = int(month)
if self.month > 12 or self.month < 1:
raise ValueError
except ValueError:
raise ValueError('Month is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid day
try:
next_ctx = ArchiveYearMonthDay(key)
except ValueError:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
@implementer(IArchive)
class ArchiveYearMonthDay(object):
"""ArchiveYearMonthDay is the context for the year/month/day archives"""
def __init__(self, day):
"""Initialises the context
:day: The day we are getting archives for
"""
log.debug("Creating new ArchiveYearMonthDay: {}".format(day))
if isinstance(day, int):
self.__name__ = '{}'.format(month)
self.day = day
if isinstance(day, basestring):
self.__name__ = day
try:
self.day = int(day)
if self.day > 31 or self.day < 1:
raise ValueError
except ValueError:
raise ValueError('Day is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid slug
try:
next_ctx = Entry(key)
except ValueError:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
| import logging
log = logging.getLogger(__name__)
from zope.interface import Interface
from zope.interface import implementer
from Entry import Entry
class IArchive(Interface):
pass
@implementer(IArchive)
class ArchiveYear(object):
"""ArchiveYear is the context for this years archives"""
def __init__(self, year):
"""Initialises the context
:year: The year we are trying to get archives for
"""
log.debug("Creating new ArchiveYear: {}".format(year))
if isinstance(year, int):
self.__name__ = '{}'.format(year)
self.year = year
if isinstance(year, basestring):
self.__name__ = year
try:
self.year = int(year)
except ValueError:
raise ValueError('Year is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid month
try:
next_ctx = ArchiveYearMonth(key)
except ValueError, e:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
@implementer(IArchive)
class ArchiveYearMonth(object):
"""ArchiveYearMonth is the context for the year/month archives"""
def __init__(self, month):
"""Initialises the context
:month: The month we are getting archives for
"""
log.debug("Creating new ArchiveYearMonth: {}".format(month))
if isinstance(month, int):
self.__name__ = '{}'.format(month)
self.month = month
if isinstance(month, basestring):
self.__name__ = month
try:
self.month = int(month)
if self.month > 12 or self.month < 1:
raise ValueError
except ValueError:
raise ValueError('Month is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid day
try:
next_ctx = ArchiveYearMonthDay(key)
except ValueError:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
@implementer(IArchive)
class ArchiveYearMonthDay(object):
"""ArchiveYearMonthDay is the context for the year/month/day archives"""
def __init__(self, day):
"""Initialises the context
:day: The day we are getting archives for
"""
log.debug("Creating new ArchiveYearMonthDay: {}".format(day))
if isinstance(day, int):
self.__name__ = '{}'.format(month)
self.day = day
if isinstance(day, basestring):
self.__name__ = day
try:
self.day = int(day)
if self.day > 31 or self.day < 1:
raise ValueError
except ValueError:
raise ValueError('Day is not valid.')
def __getitem__(self, key):
"""Return the next item in the traversal tree
:key: The next item to look for
:returns: The next traversal item
"""
next_ctx = None
if key == 'page':
pass
# Last resort, see if it is a valid slug
try:
next_ctx = Entry(key)
except ValueError:
next_ctx = None
if next_ctx is None:
raise KeyError
else:
next_ctx.__parent__ = self
next_ctx._request = self._request
return next_ctx
| Python | 0 |
f930a0d8a52ef493098390c38d27622e882a8203 | fix published_by typo | openedx/core/djangoapps/content/course_overviews/connector.py | openedx/core/djangoapps/content/course_overviews/connector.py | import MySQLdb
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
class EdevateDbConnector:
host = settings.EDEVATE_MYSQL_HOST
port = settings.EDEVATE_MYSQL_PORT
user = settings.EDEVATE_MYSQL_USER
passwd = settings.EDEVATE_MYSQL_PASSWD
db = settings.EDEVATE_MYSQL_DB_NAME
def __init__(self):
self.connection = None
self.cursor = None
self.connect()
def connect(self):
self.connection = MySQLdb.connect(host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db)
self.cursor = self.connection.cursor()
def close(self):
if self.connection:
self.cursor.close()
self.connection.close()
def get_edevate_user_id(self, user_email):
self.cursor.execute("""SELECT *
FROM users_customuser
WHERE email='{}';""".format(user_email)
)
edevate_user = self.cursor.fetchone()
logger.debug("Get edevate user: {!r}".format(edevate_user))
return edevate_user[0]
def get_verification_course(self, openedx_course_id, published_by):
self.cursor.execute("""SELECT *
FROM openedx_edxcourseverification
WHERE openedx_course_id = '{}'
AND published_by_id='{}';
""".format(openedx_course_id,
published_by)
)
verification_course = self.cursor.fetchone()
logger.debug("Get verification course: {!r}".format(
verification_course)
)
return verification_course
def create_verification_course(self, openedx_course_id, published_by_id):
self.cursor.execute("""INSERT INTO openedx_edxcourseverification
(openedx_course_id, status, published_by_id)
VALUES ('{}', 'reviewable', '{}');
""".format(openedx_course_id, published_by_id)
)
self.connection.commit()
return self.cursor.lastrowid
def update_verification_course(self, openedx_course_id, published_by_id):
self.cursor.execute("""UPDATE openedx_edxcourseverification
SET status='reviewable'
WHERE openedx_course_id='{}'
AND published_by_id='{}';
""".format(openedx_course_id, published_by_id)
)
affected_rows = self.connection.affected_rows()
self.connection.commit()
return affected_rows
def update_or_create_verification_course(self,
openedx_course_id,
course_author):
published_by_id = self.get_edevate_user_id(course_author)
verification_course = self.get_verification_course(openedx_course_id,
published_by_id)
if verification_course:
affected_rows = self.update_verification_course(openedx_course_id,
published_by_id)
logger.debug("Update verification course: {}".format(
affected_rows)
)
else:
affected_rows = self.create_verification_course(openedx_course_id,
published_by_id)
logger.debug("Create verification course: {}".format(
affected_rows)
)
return affected_rows
| import MySQLdb
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
class EdevateDbConnector:
host = settings.EDEVATE_MYSQL_HOST
port = settings.EDEVATE_MYSQL_PORT
user = settings.EDEVATE_MYSQL_USER
passwd = settings.EDEVATE_MYSQL_PASSWD
db = settings.EDEVATE_MYSQL_DB_NAME
def __init__(self):
self.connection = None
self.cursor = None
self.connect()
def connect(self):
self.connection = MySQLdb.connect(host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db)
self.cursor = self.connection.cursor()
def close(self):
if self.connection:
self.cursor.close()
self.connection.close()
def get_edevate_user_id(self, user_email):
self.cursor.execute("""SELECT *
FROM users_customuser
WHERE email='{}';""".format(user_email)
)
edevate_user = self.cursor.fetchone()
logger.debug("Get edevate user: {!r}".format(edevate_user))
return edevate_user[0]
def get_verification_course(self, openedx_course_id, published_by):
self.cursor.execute("""SELECT *
FROM openedx_edxcourseverification
WHERE openedx_course_id = '{}'
AND published_by='{}';
""".format(openedx_course_id,
published_by)
)
verification_course = self.cursor.fetchone()
logger.debug("Get verification course: {!r}".format(
verification_course)
)
return verification_course
def create_verification_course(self, openedx_course_id, published_by_id):
self.cursor.execute("""INSERT INTO openedx_edxcourseverification
(openedx_course_id, status, published_by_id)
VALUES ('{}', 'reviewable', '{}');
""".format(openedx_course_id, published_by_id)
)
self.connection.commit()
return self.cursor.lastrowid
def update_verification_course(self, openedx_course_id, published_by_id):
self.cursor.execute("""UPDATE openedx_edxcourseverification
SET status='reviewable'
WHERE openedx_course_id='{}'
AND published_by_id='{}';
""".format(openedx_course_id, published_by_id)
)
affected_rows = self.connection.affected_rows()
self.connection.commit()
return affected_rows
def update_or_create_verification_course(self,
openedx_course_id,
course_author):
published_by_id = self.get_edevate_user_id(course_author)
verification_course = self.get_verification_course(openedx_course_id,
published_by_id)
if verification_course:
affected_rows = self.update_verification_course(openedx_course_id,
published_by_id)
logger.debug("Update verification course: {}".format(
affected_rows)
)
else:
affected_rows = self.create_verification_course(openedx_course_id,
published_by_id)
logger.debug("Create verification course: {}".format(
affected_rows)
)
return affected_rows
| Python | 0 |
d01d12a0cbe286d808c9870b71374b36c0585230 | simplify newline handling | weka_weca/__init__.py | weka_weca/__init__.py |
class Node:
"""Data structure of a single node."""
def __init__(self, start, end, depth=0, indent=' '):
"""
Wrap string initially around string.
Parameters
----------
:param start : string
The start of the if-condition.
:param end : string
The end of the if-condition.
:param depth : integer
The indentation depth.
:param indent : string
The indentation style.
"""
self.start = start
self.scope = []
self.end = end
self.depth = depth
self.indent = indent
def __str__(self):
indent = self.depth * self.indent
nl = '\n'
scope = nl.join([str(node) for node in self.scope])
result = nl.join([indent + self.start, scope, indent + self.end])
return result
def port(path, method_name='classify'):
"""
Convert a single decision tree as a function.
Parameters
----------
:param path : string
The path of the exported text file.
:param method_name : string (default='classify')
The method name.
:return:
"""
# Load data:
with open(path, 'r') as file:
content = file.readlines()
# Create root node:
root = Node('', '')
atts = []
# Construct tree:
for line in content:
line = line.strip()
depth = line.count('| ')
# Get current node:
node = None
d = depth
if d > 0:
while d > 0:
node = root.scope[-1] if node is None else node.scope[-1]
d -= 1
else:
node = root.scope
# Get always the scope list:
if type(node) is not list:
node = node.scope
# Build the condition:
cond = line[(depth * len('| ')):]
has_return = line.count(':') == 1
if has_return:
cond = cond.split(':')[0]
atts.append(cond.split(' ')[0])
cond = Node('if (%s) {' % cond, '}', depth=depth+1)
# Set condition logic:
if has_return:
indent = cond.indent * (depth + 2)
return_value = line[line.find(':') + 1 : line.find('(')].strip()
return_value = indent + 'return %s;' % str(return_value)
cond.scope.append(return_value)
node.append(cond)
# Merge the relevant attributes:
atts = list(set(atts))
atts.sort()
atts = ', '.join(['float ' + a for a in atts])
# Wrap function scope around built tree:
result = ''.join(['int %s function(%s) {'%
(method_name, atts), str(root), '}'])
return result
|
class Node:
"""Data structure of a single node."""
def __init__(self, start, end, depth=0, indent=' '):
"""
Wrap string initially around string.
Parameters
----------
:param start : string
The start of the if-condition.
:param end : string
The end of the if-condition.
:param depth : integer
The indentation depth.
:param indent : string
The indentation style.
"""
self.start = start
self.scope = []
self.end = end
self.depth = depth
self.indent = indent
def __str__(self):
indent = self.depth * self.indent
scope = '\n'.join([str(node) for node in self.scope])
result = '\n'.join([indent + self.start, scope, indent + self.end])
return result
def port(path, method_name='classify'):
"""
Convert a single decision tree as a function.
Parameters
----------
:param path : string
The path of the exported text file.
:param method_name : string (default='classify')
The method name.
:return:
"""
# Load data:
with open(path, 'r') as file:
content = file.readlines()
# Create root node:
root = Node('', '')
atts = []
# Construct tree:
for line in content:
line = line.strip()
depth = line.count('| ')
# Get current node:
node = None
d = depth
if d > 0:
while d > 0:
node = root.scope[-1] if node is None else node.scope[-1]
d -= 1
else:
node = root.scope
# Get always the scope list:
if type(node) is not list:
node = node.scope
# Build the condition:
cond = line[(depth * len('| ')):]
has_return = line.count(':') == 1
if has_return:
cond = cond.split(':')[0]
atts.append(cond.split(' ')[0])
cond = Node('if (%s) {' % cond, '}', depth=depth+1)
# Set condition logic:
if has_return:
indent = cond.indent * (depth + 2)
return_value = line[line.find(':') + 1 : line.find('(')].strip()
return_value = indent + 'return %s;' % str(return_value)
cond.scope.append(return_value)
node.append(cond)
# Merge the relevant attributes:
atts = list(set(atts))
atts.sort()
atts = ', '.join(['float ' + a for a in atts])
# Wrap function scope around built tree:
result = ''.join(['int %s function(%s) {'%
(method_name, atts), str(root), '}'])
return result
| Python | 0.000147 |
9a21c446f1236e1b89663c991ea354d8e473b3b9 | Fix a copyright and pep8 issues in lanzano_luzi_2019_test.py | openquake/hazardlib/tests/gsim/lanzano_luzi_2019_test.py | openquake/hazardlib/tests/gsim/lanzano_luzi_2019_test.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2019 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Implements the tests for the set of GMPE classes included within the GMPE
of Lanzano and Luzi (2019). Test tables were created by an excel spreadsheet
that calculates expected values provided by the original authors.
"""
from openquake.hazardlib.gsim.lanzano_luzi_2019 import (LanzanoLuzi2019shallow,
LanzanoLuzi2019deep)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Discrepancy percentages to be applied to all tests
class LanzanoLuzi2019shallowTestCase(BaseGSIMTestCase):
"""
Tests the Lanzano and Luzi (2019) GMPE for the case of shallow events.
"""
GSIM_CLASS = LanzanoLuzi2019shallow
# File containing the results for the Mean
MEAN_FILE = "LL19/LanzanoLuzi2019shallow_MEAN.csv"
# File contaning the results for the Total Standard Deviation
STD_FILE = "LL19/LanzanoLuzi2019shallow_STD_TOTAL.csv"
def test_mean(self):
self.check(self.MEAN_FILE,
max_discrep_percentage=0.1)
def test_std_total(self):
self.check(self.STD_FILE,
max_discrep_percentage=0.1)
class LanzanoLuzi2019deepTestCase(BaseGSIMTestCase):
"""
Tests the Lanzano and Luzi (2019) GMPE for the case of deep events.
"""
GSIM_CLASS = LanzanoLuzi2019deep
MEAN_FILE = "LL19/LanzanoLuzi2019deep_MEAN.csv"
STD_FILE = "LL19/LanzanoLuzi2019deep_STD_TOTAL.csv"
def test_mean(self):
self.check(self.MEAN_FILE,
max_discrep_percentage=0.1)
def test_std_total(self):
self.check(self.STD_FILE,
max_discrep_percentage=0.1)
| # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2019 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Implements the tests for the set of GMPE classes included within the GMPE
of Lanzano and Luzi (2019). Test tables were created by an excel spreadsheet
that calculates expected values provided by the original authors.
"""
from openquake.hazardlib.gsim.lanzano_luzi_2019 import (LanzanoLuzi2019shallow,
LanzanoLuzi2019deep)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Discrepancy percentages to be applied to all tests
class LanzanoLuzi2019shallowTestCase(BaseGSIMTestCase):
"""
Tests the Lanzano and Luzi (2019) GMPE for the case of shallow events.
"""
GSIM_CLASS = LanzanoLuzi2019shallow
# File containing the results for the Mean
MEAN_FILE = "LL19/LanzanoLuzi2019shallow_MEAN.csv"
# File contaning the results for the Total Standard Deviation
STD_FILE = "LL19/LanzanoLuzi2019shallow_STD_TOTAL.csv"
def test_mean(self):
self.check(self.MEAN_FILE,
max_discrep_percentage=0.1)
def test_std_total(self):
self.check(self.STD_FILE,
max_discrep_percentage=0.1)
class LanzanoLuzi2019deepTestCase(BaseGSIMTestCase):
"""
Tests the Lanzano and Luzi (2019) GMPE for the case of deep events.
"""
GSIM_CLASS = LanzanoLuzi2019deep
MEAN_FILE = "LL19/LanzanoLuzi2019deep_MEAN.csv"
STD_FILE = "LL19/LanzanoLuzi2019deep_STD_TOTAL.csv"
def test_mean(self):
self.check(self.MEAN_FILE,
max_discrep_percentage=0.1)
def test_std_total(self):
self.check(self.STD_FILE,
max_discrep_percentage=0.1)
| Python | 0.001416 |
03ed43d7d8867ba066d9eea3b3fc7cfe557a31d9 | Use C++ | test/setup.py | test/setup.py | from distutils.core import setup, Extension
test_py3c_module = Extension(
'test_py3c',
sources=['test_py3c.c'],
include_dirs=['../include'],
extra_compile_args = ['--std=c++0x', '-l mylib'],
)
setup_args = dict(
name='test_py3c',
version='0.0',
description = '',
ext_modules = [test_py3c_module]
)
if __name__ == '__main__':
setup(**setup_args)
| from distutils.core import setup, Extension
test_py3c_module = Extension(
'test_py3c',
sources=['test_py3c.c'],
include_dirs=['../include'],
)
setup_args = dict(
name='test_py3c',
version='0.0',
description = '',
ext_modules = [test_py3c_module]
)
if __name__ == '__main__':
setup(**setup_args)
| Python | 0.000211 |
2543709c204f1dd6aca5d012e7c28193631bb74c | Use postgres standard env vars | gargbot_3000/config.py | gargbot_3000/config.py | #! /usr/bin/env python3.6
# coding: utf-8
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
db_name = os.environ["POSTGRES_DB"]
db_user = os.environ["POSTGRES_USER"]
db_password = os.environ["POSTGRES_PASSWORD"]
db_host = os.environ["POSTGRES_HOST"]
dropbox_token = os.environ["dropbox_token"]
dbx_pic_folder = os.environ["dbx_pic_folder"]
tz = pytz.timezone(os.environ["tz"])
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
forum_url = os.environ["forum_url"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
| #! /usr/bin/env python3.6
# coding: utf-8
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
db_name = os.environ["db_name"]
db_user = os.environ["db_user"]
db_password = os.environ["db_password"]
db_host = os.environ["db_host"]
dropbox_token = os.environ["dropbox_token"]
dbx_pic_folder = os.environ["dbx_pic_folder"]
tz = pytz.timezone(os.environ["tz"])
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
forum_url = os.environ["forum_url"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
| Python | 0 |
58e234ad7961e955726152436bcb3c8f270564c4 | fix py33 error in setup.py | vendor.py | vendor.py | import subprocess
import os
from os import path
import re
import traceback
import sys
error_msg = """
This library depends on sources fetched when packaging that failed to be
retrieved.
This means that it will *not* work as expected. Errors encountered:
"""
def run(cmd):
sys.stdout.write('[vendoring] Running command: %s\n' % ' '.join(cmd))
try:
result = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except Exception:
# if building with python2.5 this makes it compatible
_, error, _ = sys.exc_info()
print_error([], traceback.format_exc(error).split('\n'))
raise SystemExit(1)
if result.wait():
print_error(result.stdout.readlines(), result.stderr.readlines())
def print_error(stdout, stderr):
sys.stderr.write('*\n'*80)
sys.stderr.write(str(error_msg)+'\n')
for line in stdout:
sys.stderr.write(str(line)+'\n')
for line in stderr:
sys.stderr.write(str(line)+'\n')
sys.stderr.write('*'*80+'\n')
def vendor_library(name, version, git_repo):
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
vendor_init = path.join(vendor_dest, '__init__.py')
vendor_src = path.join(this_dir, name)
vendor_module = path.join(vendor_src, name)
current_dir = os.getcwd()
if path.exists(vendor_src):
run(['rm', '-rf', vendor_src])
if path.exists(vendor_init):
module_file = open(vendor_init).read()
metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file))
if metadata.get('version') != version:
run(['rm', '-rf', vendor_dest])
if not path.exists(vendor_dest):
run(['git', 'clone', git_repo])
os.chdir(vendor_src)
run(['git', 'checkout', version])
run(['mv', vendor_module, vendor_dest])
os.chdir(current_dir)
def clean_vendor(name):
"""
Ensure that vendored code/dirs are removed, possibly when packaging when
the environment flag is set to avoid vendoring.
"""
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
run(['rm', '-rf', vendor_dest])
def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1', 'https://example.com/git_repo'),
]
"""
for library in vendor_requirements:
name, version, repo = library
vendor_library(name, version, repo)
if __name__ == '__main__':
# XXX define this in one place, so that we avoid making updates
# in two places
vendor_requirements = [
('execnet', '1.2.post1', 'https://github.com/alfredodeza/execnet'),
]
vendorize(vendor_requirements)
| import subprocess
import os
from os import path
import re
import traceback
import sys
error_msg = """
This library depends on sources fetched when packaging that failed to be
retrieved.
This means that it will *not* work as expected. Errors encountered:
"""
def run(cmd):
sys.stdout.write('[vendoring] Running command: %s\n' % ' '.join(cmd))
try:
result = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except Exception:
# if building with python2.5 this makes it compatible
_, error, _ = sys.exc_info()
print_error([], traceback.format_exc(error).split('\n'))
raise SystemExit(1)
if result.wait():
print_error(result.stdout.readlines(), result.stderr.readlines())
def print_error(stdout, stderr):
sys.stderr.write('*\n'*80)
sys.stderr.write(error_msg+'\n')
for line in stdout:
sys.stderr.write(line+'\n')
for line in stderr:
sys.stderr.write(line+'\n')
sys.stderr.write('*'*80+'\n')
def vendor_library(name, version, git_repo):
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
vendor_init = path.join(vendor_dest, '__init__.py')
vendor_src = path.join(this_dir, name)
vendor_module = path.join(vendor_src, name)
current_dir = os.getcwd()
if path.exists(vendor_src):
run(['rm', '-rf', vendor_src])
if path.exists(vendor_init):
module_file = open(vendor_init).read()
metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file))
if metadata.get('version') != version:
run(['rm', '-rf', vendor_dest])
if not path.exists(vendor_dest):
run(['git', 'clone', git_repo])
os.chdir(vendor_src)
run(['git', 'checkout', version])
run(['mv', vendor_module, vendor_dest])
os.chdir(current_dir)
def clean_vendor(name):
"""
Ensure that vendored code/dirs are removed, possibly when packaging when
the environment flag is set to avoid vendoring.
"""
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'remoto/lib/vendor/%s' % name)
run(['rm', '-rf', vendor_dest])
def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1', 'https://example.com/git_repo'),
]
"""
for library in vendor_requirements:
name, version, repo = library
vendor_library(name, version, repo)
if __name__ == '__main__':
# XXX define this in one place, so that we avoid making updates
# in two places
vendor_requirements = [
('execnet', '1.2.post1', 'https://github.com/alfredodeza/execnet'),
]
vendorize(vendor_requirements)
| Python | 0 |
281fd926786186e8f0b1ebc7d8aeb1c362310fc1 | Remove unused variable | viewer.py | viewer.py | import sys
import pygame
import pygame.locals
pygame.init()
size = width, height = 575, 575
screen = pygame.display.set_mode(size)
label_lights = False
def up_row(x_offset):
for i in range(20):
x = x_offset + (i % 2) * 0.5
y = i * 0.5
yield x, y
def down_row(x_offset):
for i in range(20):
x = x_offset + ((i+ 1) % 2) * 0.5
y = 9.5 - (i * 0.5)
yield x, y
pos_list = []
for strip_pair in range(5):
pos_list += list(up_row(2 * strip_pair))
pos_list += list(down_row(2 * strip_pair + 1))
positions = {i: v for i, v in enumerate(pos_list)}
def get_color(i):
red = 255 * (i / 199.0)
green = 0
blue = 255 * ((199-i) / 199.0)
c = (int(red), int(green), int(blue))
return c
def get_screen_pos(x, y):
# upper_left is 0,0
# bottom left is 0, width
scaled_x = (int)(50*x+50)
scaled_y = (int)(width - 50 - (50*y))
return (scaled_x, scaled_y)
myfont = pygame.font.SysFont("monospace", 15)
import struct
data = open('Resources/video.bin', 'rb')
import time
while True:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
sys.exit(0)
for k, v in positions.items():
x, y = v
pos = get_screen_pos(x, y)
r = ord(data.read(1))
g = ord(data.read(1))
b = ord(data.read(1))
color = (r,g,b)
pygame.draw.circle(screen, color, pos, 10)
if label_lights:
label = myfont.render(str(k), 1, (255, 255, 255))
screen.blit(label, pos)
pygame.display.update()
time.sleep(0.05)
| import sys
import pygame
import pygame.locals
pygame.init()
size = width, height = 575, 575
screen = pygame.display.set_mode(size)
label_lights = False
def up_row(x_offset):
for i in range(20):
x = x_offset + (i % 2) * 0.5
y = i * 0.5
yield x, y
def down_row(x_offset):
for i in range(20):
x = x_offset + ((i+ 1) % 2) * 0.5
y = 9.5 - (i * 0.5)
yield x, y
pos_list = []
for strip_pair in range(5):
pos_list += list(up_row(2 * strip_pair))
pos_list += list(down_row(2 * strip_pair + 1))
positions = {i: v for i, v in enumerate(pos_list)}
red = (255, 0, 0)
def get_color(i):
red = 255 * (i / 199.0)
green = 0
blue = 255 * ((199-i) / 199.0)
c = (int(red), int(green), int(blue))
return c
def get_screen_pos(x, y):
# upper_left is 0,0
# bottom left is 0, width
scaled_x = (int)(50*x+50)
scaled_y = (int)(width - 50 - (50*y))
return (scaled_x, scaled_y)
myfont = pygame.font.SysFont("monospace", 15)
import struct
data = open('Resources/video.bin', 'rb')
import time
while True:
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
sys.exit(0)
for k, v in positions.items():
x, y = v
pos = get_screen_pos(x, y)
r = ord(data.read(1))
g = ord(data.read(1))
b = ord(data.read(1))
color = (r,g,b)
pygame.draw.circle(screen, color, pos, 10)
if label_lights:
label = myfont.render(str(k), 1, (255, 255, 255))
screen.blit(label, pos)
pygame.display.update()
time.sleep(0.05)
| Python | 0.000015 |
30088e034e32209e32e65218974ee717c718b1ab | Add start and stop entry points to sims. | wmt/flask/api/sims.py | wmt/flask/api/sims.py | import os
from flask import Blueprint
from flask import json, url_for, current_app
from flask import g, request, abort, send_file
from flask.ext.login import current_user, login_required
from ..utils import as_resource, as_collection
from ..errors import InvalidFieldError, AuthorizationError
from ..services import sims, users
from ..core import deserialize_request
from ..tasks import exec_remote_wmt
sims_page = Blueprint('sims', __name__)
def assert_owner_or_raise(sim):
user = users.first(username=current_user.get_id())
if user.id != sim.owner:
raise AuthorizationError()
@sims_page.route('/')
def show():
sort = request.args.get('sort', 'id')
order = request.args.get('order', 'asc')
return sims.jsonify_collection(sims.all(sort=sort, order=order))
@sims_page.route('/', methods=['POST'])
@login_required
def new():
data = deserialize_request(request, fields=['name', 'model'])
user = users.first(username=current_user.get_id())
sim = sims.create(data['name'], data['model'], owner=user.id)
sim.create_stage_dir()
return sim.jsonify()
@sims_page.route('/<int:id>')
def sim(id):
return sims.get_or_404(id).jsonify()
@sims_page.route('/<int:id>', methods=['PATCH', 'PUT'])
@login_required
def update(id):
sim = sims.get_or_404(id)
assert_owner_or_raise(sim)
kwds = dict(fields=['status', 'message'])
if request.method == 'PATCH':
kwds['require'] = 'some'
data = deserialize_request(request, **kwds)
sims.update_status(id, **data) or abort(401)
return sim.jsonify()
@sims_page.route('/<int:id>', methods=['DELETE'])
@login_required
def delete(id):
sim = sims.get_or_404(id)
user = users.first(username=current_user.get_id())
if user.id != sim.owner:
raise AuthorizationError()
sims.delete(sim)
return "", 204
@sims_page.route('/<int:id>/files')
def files(id):
import tempfile, tarfile, shutil
format = request.args.get('format', 'gztar')
sim = sims.get_or_404(id)
try:
tmpdir = tempfile.mkdtemp(prefix='wmt', suffix='.d')
except:
raise
else:
archive = os.path.join(tmpdir, str(sim.uuid))
name = shutil.make_archive(archive, format,
current_app.config['STAGE_DIR'], sim.uuid)
return send_file(name, attachment_filename=os.path.basename(name),
as_attachment=True)
finally:
shutil.rmtree(tmpdir)
@sims_page.route('/<int:id>/start', methods=['POST'])
def start(id):
sim = sims.get_or_404(id)
data = deserialize_request(request, fields=['host',
'username',
'password'])
hosts = current_app.config['WMT_EXEC_HOSTS']
if data['host'] not in hosts:
raise InvalidFieldError('start', 'host')
else:
host_config = hosts[data['host']]
return exec_remote_wmt(data['host'], sim.uuid,
username=data['username'],
password=data['password'],
which_wmt_exe=host_config['which_wmt_exe'])
@sims_page.route('/<int:id>/stop', methods=['POST'])
def stop(id):
sim = sims.get_or_404(id)
stop_simulation(sim.uuid)
return '', 204
| import os
from flask import Blueprint
from flask import json, url_for, current_app
from flask import g, request, abort, send_file
from flask.ext.login import current_user, login_required
from ..utils import as_resource, as_collection
from ..errors import InvalidFieldError, AuthorizationError
from ..services import sims, users
from ..core import deserialize_request
sims_page = Blueprint('sims', __name__)
def assert_owner_or_raise(sim):
user = users.first(username=current_user.get_id())
if user.id != sim.owner:
raise AuthorizationError()
@sims_page.route('/')
def show():
sort = request.args.get('sort', 'id')
order = request.args.get('order', 'asc')
return sims.jsonify_collection(sims.all(sort=sort, order=order))
@sims_page.route('/', methods=['POST'])
@login_required
def new():
data = deserialize_request(request, fields=['name', 'model'])
user = users.first(username=current_user.get_id())
sim = sims.create(data['name'], data['model'], owner=user.id)
sim.create_stage_dir()
return sim.jsonify()
@sims_page.route('/<int:id>')
def sim(id):
return sims.get_or_404(id).jsonify()
@sims_page.route('/<int:id>', methods=['PATCH', 'PUT'])
@login_required
def update(id):
sim = sims.get_or_404(id)
assert_owner_or_raise(sim)
kwds = dict(fields=['status', 'message'])
if request.method == 'PATCH':
kwds['require'] = 'some'
data = deserialize_request(request, **kwds)
sims.update_status(id, **data) or abort(401)
return sim.jsonify()
@sims_page.route('/<int:id>', methods=['DELETE'])
@login_required
def delete(id):
sim = sims.get_or_404(id)
user = users.first(username=current_user.get_id())
if user.id != sim.owner:
raise AuthorizationError()
sims.delete(sim)
return "", 204
@sims_page.route('/<int:id>/files')
def files(id):
import tempfile, tarfile, shutil
format = request.args.get('format', 'gztar')
sim = sims.get_or_404(id)
try:
tmpdir = tempfile.mkdtemp(prefix='wmt', suffix='.d')
except:
raise
else:
archive = os.path.join(tmpdir, str(sim.uuid))
name = shutil.make_archive(archive, format,
current_app.config['STAGE_DIR'], sim.uuid)
return send_file(name, attachment_filename=os.path.basename(name),
as_attachment=True)
finally:
shutil.rmtree(tmpdir)
@sims_page.route('/<int:id>/actions', methods=['POST'])
def actions(id):
if request.method == 'POST':
data = deserialize_request(request, fields=['action'])
if data['action'] == 'start':
sims.start(id)
elif data['action'] == 'stop':
sims.stop(id)
else:
raise InvalidFieldError('sim', 'action')
| Python | 0 |
8c026f79c223091c06deb0589dbf0ee342f4f0e5 | Change skeleton #2. | wood_site/settings.py | wood_site/settings.py | """
Django settings for wood_site project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8c$$$u7^8%7uwjwqj4ob+8$@ao*)i^m@hwt^cx1c3ht)n@c(a0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'wood',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wood_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wood_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wood',
'USER': 'postgres',
'PASSWORD': '123',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'Wood/static'),
) | """
Django settings for wood_site project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8c$$$u7^8%7uwjwqj4ob+8$@ao*)i^m@hwt^cx1c3ht)n@c(a0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'wood',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wood_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wood_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wood',
'USER': 'postgres',
'PASSWORD': '123',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| Python | 0 |
2321dd5b0afedb9bb4a6e894149dd636174adf2c | Bump version to 4.0.1 | stix2elevator/version.py | stix2elevator/version.py | __version__ = "4.0.1"
| __version__ = "4.0.0"
| Python | 0 |
282ac04e49c6adef237ea30fa4dcae64e6f959d8 | Support for non-blank server roots | stronghold/middleware.py | stronghold/middleware.py | from django.contrib.auth.decorators import login_required
from stronghold import conf
class LoginRequiredMiddleware(object):
"""
Force all views to use login required
View is deemed to be public if the @public decorator is applied to the view
View is also deemed to be Public if listed in in django settings in the
STRONGHOLD_PUBLIC_URLS dictionary
each url in STRONGHOLD_PUBLIC_URLS must be a valid regex
"""
def __init__(self, *args, **kwargs):
self.public_view_urls = getattr(conf, 'STRONGHOLD_PUBLIC_URLS', ())
def process_view(self, request, view_func, view_args, view_kwargs):
# if request is authenticated, dont process it
if request.user.is_authenticated():
return None
# if its a public view, don't process it
is_public = getattr(view_func, 'STRONGHOLD_IS_PUBLIC', None)
if is_public:
return None
# if this view matches a whitelisted regex, don't process it
for view_url in self.public_view_urls:
if view_url.match(request.path_info):
return None
return login_required(view_func)(request, *view_args, **view_kwargs)
| from django.contrib.auth.decorators import login_required
from stronghold import conf
class LoginRequiredMiddleware(object):
"""
Force all views to use login required
View is deemed to be public if the @public decorator is applied to the view
View is also deemed to be Public if listed in in django settings in the
STRONGHOLD_PUBLIC_URLS dictionary
each url in STRONGHOLD_PUBLIC_URLS must be a valid regex
"""
def __init__(self, *args, **kwargs):
self.public_view_urls = getattr(conf, 'STRONGHOLD_PUBLIC_URLS', ())
def process_view(self, request, view_func, view_args, view_kwargs):
# if request is authenticated, dont process it
if request.user.is_authenticated():
return None
# if its a public view, don't process it
is_public = getattr(view_func, 'STRONGHOLD_IS_PUBLIC', None)
if is_public:
return None
# if this view matches a whitelisted regex, don't process it
for view_url in self.public_view_urls:
if view_url.match(request.path):
return None
return login_required(view_func)(request, *view_args, **view_kwargs)
| Python | 0 |
79893a76c0b438ab3885a9c09027842ff92a26d2 | Update multiplier constant / tweak brightness | wakeup.py | wakeup.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# The Pin. Use Broadcom numbers.
BRIGHT_PIN = 17
# User config
START_BRIGHT = 15 # Minutes before alarm to start lighting up
END_BRIGHT = -45 # Minutes after alarm to turn off
MAX_BRIGHT = 255 # Max brightness 1 - 255
# Other constants
BRIGHT_MULTI = MAX_BRIGHT / START_BRIGHT
SLEEP_TIME = 10
import os
import sys
import pigpio
import time
import datetime
import signal
from thread import start_new_thread
try:
alarmTime = datetime.datetime.strptime(sys.argv[1], '%H:%M')
alarmTime = alarmTime.replace(year=2000, month=1, day=1)
print(alarmTime)
except:
sys.stdout.write('Usage: %s HH:MM' % os.path.basename(sys.argv[0]))
print("")
quit()
bright = 0
oldbright = 0
abort = False
pi = pigpio.pi()
def setLights(pin, brightness):
realBrightness = int(int(brightness) * (float(bright) / 255.0))
pi.set_PWM_dutycycle(pin, realBrightness)
def fadeLights(pin, brightness):
# print("FADE IN")
newBrightness = brightness
currentBrightness = pi.get_PWM_dutycycle(pin)
if newBrightness < currentBrightness:
setLights(BRIGHT_PIN, brightness)
return
while currentBrightness < newBrightness:
currentBrightness = currentBrightness + 1
pi.set_PWM_dutycycle(pin, currentBrightness)
time.sleep(0.1)
# print("FADE OUT")
def sigterm_handler(_signo, _stack_frame):
setLights(BRIGHT_PIN, 0)
abort = True
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
def checkTime():
global bright
global oldbright
global abort
while True:
now = datetime.datetime.now()
now = now.replace(year=2000, month=1, day=1) # , hour=6, minute=25, second=0
d1_ts = time.mktime(now.timetuple())
d2_ts = time.mktime(alarmTime.timetuple())
minuteDiff = (d2_ts - d1_ts) / 60
percDiff = 0
if minuteDiff < START_BRIGHT and minuteDiff > END_BRIGHT:
if minuteDiff < 0:
bright = MAX_BRIGHT
else:
bright = (START_BRIGHT - minuteDiff) * BRIGHT_MULTI
percDiff = (minuteDiff / START_BRIGHT) * 100
if percDiff > 5:
bright = bright * 0.2
elif percDiff > 1:
bright = bright * 0.5
else:
bright = 0
bright = round(bright)
if bright > MAX_BRIGHT:
bright = MAX_BRIGHT
print("MINUTE: " + str(minuteDiff))
print("DIFF: " + str(percDiff))
print("BRIGHT: " + str(bright))
time.sleep(SLEEP_TIME)
start_new_thread(checkTime, ())
while abort == False:
if bright != oldbright:
oldbright = bright
if bright > 0:
fadeLights(BRIGHT_PIN, bright)
else:
setLights(BRIGHT_PIN, bright)
time.sleep(0.1)
setLights(BRIGHT_PIN, 0)
time.sleep(0.5)
pi.stop()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# The Pin. Use Broadcom numbers.
BRIGHT_PIN = 17
# User config
START_BRIGHT = 15 # Minutes before alarm to start lighting up
END_BRIGHT = -45 # Minutes after alarm to turn off
MAX_BRIGHT = 255 # Max brightness 1 - 255
# Other constants
BRIGHT_MULTI = 255 / START_BRIGHT
SLEEP_TIME = 10
import os
import sys
import pigpio
import time
import datetime
import signal
from thread import start_new_thread
try:
alarmTime = datetime.datetime.strptime(sys.argv[1], '%H:%M')
alarmTime = alarmTime.replace(year=2000, month=1, day=1)
print(alarmTime)
except:
sys.stdout.write('Usage: %s HH:MM' % os.path.basename(sys.argv[0]))
print("")
quit()
bright = 0
oldbright = 0
abort = False
pi = pigpio.pi()
def setLights(pin, brightness):
realBrightness = int(int(brightness) * (float(bright) / 255.0))
pi.set_PWM_dutycycle(pin, realBrightness)
def fadeLights(pin, brightness):
# print("FADE IN")
newBrightness = brightness
currentBrightness = pi.get_PWM_dutycycle(pin)
if newBrightness < currentBrightness:
setLights(BRIGHT_PIN, brightness)
return
while currentBrightness < newBrightness:
currentBrightness = currentBrightness + 1
pi.set_PWM_dutycycle(pin, currentBrightness)
time.sleep(0.1)
# print("FADE OUT")
def sigterm_handler(_signo, _stack_frame):
setLights(BRIGHT_PIN, 0)
abort = True
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
def checkTime():
global bright
global oldbright
global abort
while True:
now = datetime.datetime.now()
now = now.replace(year=2000, month=1, day=1) # , hour=6, minute=25, second=0
d1_ts = time.mktime(now.timetuple())
d2_ts = time.mktime(alarmTime.timetuple())
minuteDiff = (d2_ts - d1_ts) / 60
percDiff = 0
if minuteDiff < START_BRIGHT and minuteDiff > END_BRIGHT:
if minuteDiff < 0:
bright = MAX_BRIGHT
else:
bright = (START_BRIGHT - minuteDiff) * BRIGHT_MULTI
percDiff = (minuteDiff / START_BRIGHT) * 100
if percDiff > 50:
bright = bright * 0.2
elif percDiff > 5:
bright = bright * 0.5
else:
bright = 0
bright = round(bright)
if bright > MAX_BRIGHT:
bright = MAX_BRIGHT
print("MINUTE: " + str(minuteDiff))
print("DIFF: " + str(percDiff))
print("BRIGHT: " + str(bright))
time.sleep(SLEEP_TIME)
start_new_thread(checkTime, ())
while abort == False:
if bright != oldbright:
oldbright = bright
if bright > 0:
fadeLights(BRIGHT_PIN, bright)
else:
setLights(BRIGHT_PIN, bright)
time.sleep(0.1)
setLights(BRIGHT_PIN, 0)
time.sleep(0.5)
pi.stop()
| Python | 0 |
35de4045bc30a1ee0e9aaa17f0b3f370ad95d6c8 | Bump (#16) | swag_client/__about__.py | swag_client/__about__.py | from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "swag-client"
__summary__ = ("Cloud multi-account metadata management tool.")
__uri__ = "https://github.com/Netflix-Skunkworks/swag-client"
__version__ = "0.2.5"
__author__ = "The swag developers"
__email__ = "oss@netflix.com"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2017 {0}".format(__author__)
| from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "swag-client"
__summary__ = ("Cloud multi-account metadata management tool.")
__uri__ = "https://github.com/Netflix-Skunkworks/swag-client"
__version__ = "0.2.3"
__author__ = "The swag developers"
__email__ = "oss@netflix.com"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2017 {0}".format(__author__)
| Python | 0 |
2c2e5bc83bbca3cd3c7ce7649be657293978ddd8 | append _tbl | webapp.py | webapp.py | import cherrypy
import os
import json
import getpass
from collections import OrderedDict
from configparser import ConfigParser
from builtins import input
import mysql.connector
#
# import sys
# import os.path
class DBConfig:
settings = {}
def __init__(self, config_file='..\db.cfg'):
if os.path.isfile(config_file):
config = ConfigParser()
config.readfp(open(config_file))
for section in config.sections():
self.settings[section] = {}
for option in config.options(section):
self.settings[section][option] = config.get(section, option)
def get_list(list_name, params):
print('get_list list:{} params:{}'.format(str(list_name), str(params)))
print(db_conf.settings['DB']['db_user'])
print(db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
return_vals = OrderedDict()
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_user']+'.mysql.pythonanywhere-services.com',
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor()
query = ("SELECT * FROM " + list_name + "_tbl" )
cursor.execute(query)
for (participant_name) in cursor:
print("{}".format(participant_name))
cursor.close()
cnx.close()
return_vals['test1'] = 'Hello World'
return return_vals
class HoppersWebService(object):
exposed = True
def GET(self,*args):
print('GET:/hoppers/'+str(args))
if not args:
args = [None]
return json.dumps(get_list(args[0],args[1:]))
def POST(self, **kwargs):
return 'POST:/hoppers/' + str(kwargs)
def PUT(self, **kwargs):
return 'PUT:/hoppers/' + str(kwargs)
def DELETE(self, **kwargs):
return 'DELETE:/hoppers/' + str(kwargs)
class ws():
def __init__(self):
db_conf = DBConfig()
if not 'DB' in db_conf.settings.keys():
db_conf.settings['DB'] = {}
if not 'db_name' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_name'] = input('db_name:')
if not 'db_user' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_user'] = input('db_user:')
if not 'db_pass' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_pass'] = getpass.getpass('Password:')
print("name {}".format(db_conf.settings['DB']['db_name']))
print("user {}".format(db_conf.settings['DB']['db_user']))
cherrypy.tree.mount(
HoppersWebService(),
'/hoppers',
{
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
},
}, )
# cherrypy.tree.mount(
# Root(),
# '/',
# {
# '/': {
# 'tools.sessions.on': True,
# 'tools.staticdir.root': os.path.abspath(os.getcwd())
# }
# }
# )
cherrypy.config.update({'server.socket_port': 80})
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
foo = ws() | import cherrypy
import os
import json
import getpass
from collections import OrderedDict
from configparser import ConfigParser
from builtins import input
import mysql.connector
#
# import sys
# import os.path
class DBConfig:
settings = {}
def __init__(self, config_file='..\db.cfg'):
if os.path.isfile(config_file):
config = ConfigParser()
config.readfp(open(config_file))
for section in config.sections():
self.settings[section] = {}
for option in config.options(section):
self.settings[section][option] = config.get(section, option)
def get_list(list_name, params):
print('get_list list:{} params:{}'.format(str(list_name), str(params)))
print(db_conf.settings['DB']['db_user'])
print(db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
return_vals = OrderedDict()
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_user']+'.mysql.pythonanywhere-services.com',
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor()
query = ("SELECT * FROM " + list_name )
cursor.execute(query)
for (participant_name) in cursor:
print("{}".format(participant_name))
cursor.close()
cnx.close()
return_vals['test1'] = 'Hello World'
return return_vals
class HoppersWebService(object):
exposed = True
def GET(self,*args):
print('GET:/hoppers/'+str(args))
if not args:
args = [None]
return json.dumps(get_list(args[0],args[1:]))
def POST(self, **kwargs):
return 'POST:/hoppers/' + str(kwargs)
def PUT(self, **kwargs):
return 'PUT:/hoppers/' + str(kwargs)
def DELETE(self, **kwargs):
return 'DELETE:/hoppers/' + str(kwargs)
class ws():
def __init__(self):
db_conf = DBConfig()
if not 'DB' in db_conf.settings.keys():
db_conf.settings['DB'] = {}
if not 'db_name' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_name'] = input('db_name:')
if not 'db_user' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_user'] = input('db_user:')
if not 'db_pass' in db_conf.settings['DB'].keys():
db_conf.settings['DB']['db_pass'] = getpass.getpass('Password:')
print("name {}".format(db_conf.settings['DB']['db_name']))
print("user {}".format(db_conf.settings['DB']['db_user']))
cherrypy.tree.mount(
HoppersWebService(),
'/hoppers',
{
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
},
}, )
# cherrypy.tree.mount(
# Root(),
# '/',
# {
# '/': {
# 'tools.sessions.on': True,
# 'tools.staticdir.root': os.path.abspath(os.getcwd())
# }
# }
# )
cherrypy.config.update({'server.socket_port': 80})
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
foo = ws() | Python | 0.999691 |
67614fb784dca6166b112ddc60254ef5e493541d | Change 9/5 to 1.8 | wfinal.py | wfinal.py | import RPi.GPIO as GPIO
import pywapi
import string
import time
channels = [4, 7, 8, 9, 10, 14, 15, 17, 18, 22, 23, 24, 25]
GPIO.setwarnings(True)
GPIO.setmode(GPIO.BCM)
GPIO.setup(channels, GPIO.OUT)
GPIO.output(channels, 0)
weather = pywapi.get_weather_from_weather_com('33020')
temperature = int(weather['current_conditions']['temperature'])
temp_f = temperature*(1.8)+32
humidity = int(weather['current_conditions']['humidity'])
cc = (weather['current_conditions']['text'].lower())
if humidity >= 80:
GPIO.output(7, 1)
if humidity <= 79 and humidity >= 60:
GPIO.output(18, 1)
if humidity <= 59:
GPIO.output(25, 1)
if temp_f >= 90:
GPIO.output(14, 1)
if temp_f <= 89 and temp_f >= 80:
GPIO.output(14, 1)
if temp_f <= 79 and temp_f >= 70:
GPIO.output(18, 1)
if temp_f <= 69 and temp_f >= 40:
GPIO.output(23, 1)
if temp_f <= 39:
GPIO.output(24, 1)
if cc == 'drizzle' or 'light drizzle' or 'heavy drizzle':
GPIO.output(4, 1)
if cc == 'rain' or 'light rain':
GPIO.output(4, 1)
if cc == 'heavy rain':
GPIO.output(17, 1)
if cc == 'light rain mist' or 'rain mist' or 'heavy rain mist':
GPIO.output(4, 1)
if cc == 'rain shower' or 'light rain showers':
GPIO.output(4, 1)
if cc == 'heavy rain shower':
GPIO.output(17, 1)
if cc == 'light thunderstorm' or 'heavy thunderstorm' or 'thunderstorm':
GPIO.output(17, 1)
GPIO.output(10, 1)
GPIO.output(9, 1)
if cc == 'light freezing drizzle' or 'heavy freezing drizzle' or 'freezing drizzle':
GPIO.output(4, 1)
if cc == 'light freezing rain' or 'heavy freezing rain' or 'freezing rain':
GPIO.output(17, 1)
if cc == 'patches of fog' or 'shallow fog' or 'partial fog' or 'light fog':
GPIO.output(22, 1)
if cc == 'fog' or 'heavy fog' or 'heavy fog patches' or 'light fog patches':
GPIO.output(10, 1)
if cc == 'overcast':
GPIO.output(10, 1)
if cc == 'partly cloudy' or 'scattered clouds':
GPIO.output(22, 1)
if cc == 'mostly cloudy':
GPIO.output(10, 1)
| import RPi.GPIO as GPIO
import pywapi
import string
import time
channels = [4, 7, 8, 9, 10, 14, 15, 17, 18, 22, 23, 24, 25]
GPIO.setwarnings(True)
GPIO.setmode(GPIO.BCM)
GPIO.setup(channels, GPIO.OUT)
GPIO.output(channels, 0)
weather = pywapi.get_weather_from_weather_com('33020')
temperature = int(weather['current_conditions']['temperature'])
temp_f = temperature * (9/5)+32
humidity = int(weather['current_conditions']['humidity'])
cc = (weather['current_conditions']['text'].lower())
if humidity >= 80:
GPIO.output(7, 1)
if humidity <= 79 and humidity >= 60:
GPIO.output(18, 1)
if humidity <= 59:
GPIO.output(25, 1)
if temp_f >= 90:
GPIO.output(14, 1)
if temp_f <= 89 and temp_f >= 80:
GPIO.output(14, 1)
if temp_f <= 79 and temp_f >= 70:
GPIO.output(18, 1)
if temp_f <= 69 and temp_f >= 40:
GPIO.output(23, 1)
if temp_f <= 39:
GPIO.output(24, 1)
if cc == 'drizzle' or 'light drizzle' or 'heavy drizzle':
GPIO.output(4, 1)
if cc == 'rain' or 'light rain':
GPIO.output(4, 1)
if cc == 'heavy rain':
GPIO.output(17, 1)
if cc == 'light rain mist' or 'rain mist' or 'heavy rain mist':
GPIO.output(4, 1)
if cc == 'rain shower' or 'light rain showers':
GPIO.output(4, 1)
if cc == 'heavy rain shower':
GPIO.output(17, 1)
if cc == 'light thunderstorm' or 'heavy thunderstorm' or 'thunderstorm':
GPIO.output(17, 1)
GPIO.output(10, 1)
GPIO.output(9, 1)
if cc == 'light freezing drizzle' or 'heavy freezing drizzle' or 'freezing drizzle':
GPIO.output(4, 1)
if cc == 'light freezing rain' or 'heavy freezing rain' or 'freezing rain':
GPIO.output(17, 1)
if cc == 'patches of fog' or 'shallow fog' or 'partial fog' or 'light fog':
GPIO.output(22, 1)
if cc == 'fog' or 'heavy fog' or 'heavy fog patches' or 'light fog patches':
GPIO.output(10, 1)
if cc == 'overcast':
GPIO.output(10, 1)
if cc == 'partly cloudy' or 'scattered clouds':
GPIO.output(22, 1)
if cc == 'mostly cloudy':
GPIO.output(10, 1)
| Python | 0.999999 |
5b02f334519964ffae6812df5413fcdae84db6ba | Undo changes to logger config, ie. remove the access_log_file option: decision is to support this through log_config rather tan adding an option. | synapse/config/logger.py | synapse/config/logger.py | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from synapse.util.logcontext import LoggingContextFilter
from twisted.python.log import PythonLoggingObserver
import logging
import logging.config
import yaml
class LoggingConfig(Config):
def __init__(self, args):
super(LoggingConfig, self).__init__(args)
self.verbosity = int(args.verbose) if args.verbose else None
self.log_config = self.abspath(args.log_config)
self.log_file = self.abspath(args.log_file)
@classmethod
def add_arguments(cls, parser):
super(LoggingConfig, cls).add_arguments(parser)
logging_group = parser.add_argument_group("logging")
logging_group.add_argument(
'-v', '--verbose', dest="verbose", action='count',
help="The verbosity level."
)
logging_group.add_argument(
'-f', '--log-file', dest="log_file", default="homeserver.log",
help="File to log to."
)
logging_group.add_argument(
'--log-config', dest="log_config", default=None,
help="Python logging config file"
)
def setup_logging(self):
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
" - %(message)s"
)
if self.log_config is None:
level = logging.INFO
level_for_storage = logging.INFO
if self.verbosity:
level = logging.DEBUG
if self.verbosity > 1:
level_for_storage = logging.DEBUG
# FIXME: we need a logging.WARN for a -q quiet option
logger = logging.getLogger('')
logger.setLevel(level)
logging.getLogger('synapse.storage').setLevel(level_for_storage)
formatter = logging.Formatter(log_format)
if self.log_file:
# TODO: Customisable file size / backup count
handler = logging.handlers.RotatingFileHandler(
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
else:
with open(self.log_config, 'r') as f:
logging.config.dictConfig(yaml.load(f))
observer = PythonLoggingObserver()
observer.start()
| # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from synapse.util.logcontext import LoggingContextFilter
from twisted.python.log import PythonLoggingObserver
import logging
import logging.config
import yaml
class LoggingConfig(Config):
def __init__(self, args):
super(LoggingConfig, self).__init__(args)
self.verbosity = int(args.verbose) if args.verbose else None
self.log_config = self.abspath(args.log_config)
self.log_file = self.abspath(args.log_file)
self.access_log_file = self.abspath(args.access_log_file)
@classmethod
def add_arguments(cls, parser):
super(LoggingConfig, cls).add_arguments(parser)
logging_group = parser.add_argument_group("logging")
logging_group.add_argument(
'-v', '--verbose', dest="verbose", action='count',
help="The verbosity level."
)
logging_group.add_argument(
'-f', '--log-file', dest="log_file", default="homeserver.log",
help="File to log to."
)
logging_group.add_argument(
'--log-config', dest="log_config", default=None,
help="Python logging config file"
)
logging_group.add_argument(
'--access-log-file', dest="access_log_file", default="access.log",
help="File to log server access to"
)
def setup_logging(self):
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
" - %(message)s"
)
if self.log_config is None:
level = logging.INFO
level_for_storage = logging.INFO
if self.verbosity:
level = logging.DEBUG
if self.verbosity > 1:
level_for_storage = logging.DEBUG
# FIXME: we need a logging.WARN for a -q quiet option
logger = logging.getLogger('')
logger.setLevel(level)
logging.getLogger('synapse.storage').setLevel(level_for_storage)
formatter = logging.Formatter(log_format)
if self.log_file:
# TODO: Customisable file size / backup count
handler = logging.handlers.RotatingFileHandler(
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
if self.access_log_file:
access_logger = logging.getLogger('synapse.access')
# we log to both files by default
access_logger.propagate = 1
access_log_handler = logging.handlers.RotatingFileHandler(
self.access_log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
)
access_log_formatter = logging.Formatter('%(message)s')
access_log_handler.setFormatter(access_log_formatter)
access_logger.addHandler(access_log_handler)
else:
with open(self.log_config, 'r') as f:
logging.config.dictConfig(yaml.load(f))
observer = PythonLoggingObserver()
observer.start()
| Python | 0 |
07f8f44fc5f69c71922bb3b85d621867d0df49fa | Support core logger as a property on the main scraper. | scrapekit/core.py | scrapekit/core.py | from uuid import uuid4
from time import time
from datetime import datetime
from threading import local
from scrapekit.config import Config
from scrapekit.tasks import TaskManager, Task
from scrapekit.http import make_session
from scrapekit.logs import make_logger
class Scraper(object):
""" Scraper application object which handles resource management
for a variety of related functions. """
def __init__(self, name, config=None):
self.name = name
self.id = uuid4()
self.start_time = datetime.utcnow()
self.config = Config(self, config)
self._task_manager = None
self.task_ctx = local()
self.log = make_logger(self)
self.log.info("Starting %s, %d threads.", self.name,
self.config.threads)
@property
def task_manager(self):
if self._task_manager is None:
self._task_manager = \
TaskManager(threads=self.config.threads)
return self._task_manager
def task(self, fn):
""" Decorate a function as a task in the scraper framework.
This will enable the function to be queued and executed in
a separate thread, allowing for the execution of the scraper
to be asynchronous.
"""
return Task(self, fn)
def Session(self):
""" Create a pre-configured ``requests`` session instance
that can be used to run HTTP requests. This instance will
potentially be cached, or a stub, depending on the
configuration of the scraper. """
return make_session(self)
def head(self, url, **kwargs):
""" HTTP HEAD via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.head
"""
return self.Session().get(url, **kwargs)
def get(self, url, **kwargs):
""" HTTP GET via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.get
"""
return self.Session().get(url, **kwargs)
def post(self, url, **kwargs):
""" HTTP POST via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.post
"""
return self.Session().post(url, **kwargs)
def put(self, url, **kwargs):
""" HTTP PUT via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.put
"""
return self.Session().put(url, **kwargs)
def __repr__(self):
return '<Scraper(%s)>' % self.name
| from scrapekit.config import Config
from scrapekit.tasks import TaskManager, Task
from scrapekit.http import make_session
class Scraper(object):
""" Scraper application object which handles resource management
for a variety of related functions. """
def __init__(self, name, config=None):
self.name = name
self.config = Config(self, config)
self._task_manager = None
@property
def task_manager(self):
if self._task_manager is None:
self._task_manager = \
TaskManager(threads=self.config.threads)
return self._task_manager
def task(self, fn):
""" Decorate a function as a task in the scraper framework.
This will enable the function to be queued and executed in
a separate thread, allowing for the execution of the scraper
to be asynchronous.
"""
return Task(self, fn)
def Session(self):
""" Create a pre-configured ``requests`` session instance
that can be used to run HTTP requests. This instance will
potentially be cached, or a stub, depending on the
configuration of the scraper. """
return make_session(self)
def head(self, url, **kwargs):
""" HTTP HEAD via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.head
"""
return self.Session().get(url, **kwargs)
def get(self, url, **kwargs):
""" HTTP GET via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.get
"""
return self.Session().get(url, **kwargs)
def post(self, url, **kwargs):
""" HTTP POST via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.post
"""
return self.Session().post(url, **kwargs)
def put(self, url, **kwargs):
""" HTTP PUT via ``requests``.
See: http://docs.python-requests.org/en/latest/api/#requests.put
"""
return self.Session().put(url, **kwargs)
def __repr__(self):
return '<Scraper(%s)>' % self.name
| Python | 0 |
6edd4114c4e715a3a0c440af455fff089a099620 | Clarify comment about Pyhton versions | scrapy/squeues.py | scrapy/squeues.py | """
Scheduler queues
"""
import marshal
from six.moves import cPickle as pickle
from queuelib import queue
def _serializable_queue(queue_class, serialize, deserialize):
class SerializableQueue(queue_class):
def push(self, obj):
s = serialize(obj)
super(SerializableQueue, self).push(s)
def pop(self):
s = super(SerializableQueue, self).pop()
if s:
return deserialize(s)
return SerializableQueue
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
# Python <= 3.4 raises pickle.PicklingError here while
# 3.5 <= Python < 3.6 raises AttributeError and
# Python >= 3.6 raises TypeError
except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e))
PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
_pickle_serialize, pickle.loads)
PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
_pickle_serialize, pickle.loads)
MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
marshal.dumps, marshal.loads)
MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
marshal.dumps, marshal.loads)
FifoMemoryQueue = queue.FifoMemoryQueue
LifoMemoryQueue = queue.LifoMemoryQueue
| """
Scheduler queues
"""
import marshal
from six.moves import cPickle as pickle
from queuelib import queue
def _serializable_queue(queue_class, serialize, deserialize):
class SerializableQueue(queue_class):
def push(self, obj):
s = serialize(obj)
super(SerializableQueue, self).push(s)
def pop(self):
s = super(SerializableQueue, self).pop()
if s:
return deserialize(s)
return SerializableQueue
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
# Python<=3.4 raises pickle.PicklingError here while
# Python>=3.5 raises AttributeError and
# Python>=3.6 raises TypeError
except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e))
PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
_pickle_serialize, pickle.loads)
PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
_pickle_serialize, pickle.loads)
MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
marshal.dumps, marshal.loads)
MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
marshal.dumps, marshal.loads)
FifoMemoryQueue = queue.FifoMemoryQueue
LifoMemoryQueue = queue.LifoMemoryQueue
| Python | 0.000001 |
d6cdf99d87b23cd6bfd8fd7079919d89d6496501 | Complete incomplete sentence | partner_identification/models/res_partner_id_category.py | partner_identification/models/res_partner_id_category.py | # -*- coding: utf-8 -*-
#
# ยฉ 2004-2010 Tiny SPRL http://tiny.be
# ยฉ 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH
# http://www.camptocamp.at
# ยฉ 2015 Antiun Ingenieria, SL (Madrid, Spain)
# http://www.antiun.com
# Antonio Espinosa <antonioea@antiun.com>
# ยฉ 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, fields
from openerp.exceptions import ValidationError, UserError
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
class ResPartnerIdCategory(models.Model):
_name = "res.partner.id_category"
_order = "name"
code = fields.Char(string="Code", size=16, required=True)
name = fields.Char(string="ID name", required=True, translate=True)
active = fields.Boolean(string="Active", default=True)
validation_code = fields.Text(
'Python validation code',
help="Python code called to validate an id number.",
default="""
# Python code. Use failed = True to specify that the id number is not valid.
# You can use the following variables :
# - self: browse_record of the current ID Category browse_record
# - id_number: browse_record of ID number to validte
"""
)
@api.multi
def _validation_eval_context(self, id_number):
self.ensure_one()
return {'self': self,
'id_number': id_number,
}
@api.multi
def validate_id_number(self, id_number):
"""Validate the given ID number
The method raises an openerp.exceptions.ValidationError if the eval of
python validation code fails
"""
self.ensure_one()
eval_context = self._validation_eval_context(id_number)
try:
safe_eval(self.validation_code,
eval_context,
mode='exec',
nocopy=True)
except Exception as e:
raise UserError(
_('Error when evaluating the id_category validation code:'
':\n %s \n(%s)') % (self.name, e))
if eval_context.get('failed', False):
raise ValidationError(
_("%s is not a valid %s identifier") % (
id_number.name, self.name))
| # -*- coding: utf-8 -*-
#
# ยฉ 2004-2010 Tiny SPRL http://tiny.be
# ยฉ 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH
# http://www.camptocamp.at
# ยฉ 2015 Antiun Ingenieria, SL (Madrid, Spain)
# http://www.antiun.com
# Antonio Espinosa <antonioea@antiun.com>
# ยฉ 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, fields
from openerp.exceptions import ValidationError, UserError
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
class ResPartnerIdCategory(models.Model):
_name = "res.partner.id_category"
_order = "name"
code = fields.Char(string="Code", size=16, required=True)
name = fields.Char(string="ID name", required=True, translate=True)
active = fields.Boolean(string="Active", default=True)
validation_code = fields.Text(
'Python validation code',
help="Python code called to validate an id number.",
default="""
# Python code. Use failed = True to .
# You can use the following variables :
# - self: browse_record of the current ID Category browse_record
# - id_number: browse_record of ID number to validte
"""
)
@api.multi
def _validation_eval_context(self, id_number):
self.ensure_one()
return {'self': self,
'id_number': id_number,
}
@api.multi
def validate_id_number(self, id_number):
"""Validate the given ID number
The method raises an openerp.exceptions.ValidationError if the eval of
python validation code fails
"""
self.ensure_one()
eval_context = self._validation_eval_context(id_number)
try:
safe_eval(self.validation_code,
eval_context,
mode='exec',
nocopy=True)
except Exception as e:
raise UserError(
_('Error when evaluating the id_category validation code:'
':\n %s \n(%s)') % (self.name, e))
if eval_context.get('failed', False):
raise ValidationError(
_("%s is not a valid %s identifier") % (
id_number.name, self.name))
| Python | 0.998694 |
573d3a7411a1653f64b901077264ecb98c1f9673 | Use subprocess.check_call replace os.system | script/version.py | script/version.py | import importlib
import os
import sys
import subprocess
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
subprocess.check_call(f"poetry version {get_version()}", shell=True)
subprocess.check_call("git add a2wsgi/__init__.py pyproject.toml", shell=True)
subprocess.check_call(f'git commit -m "v{get_version()}"', shell=True)
subprocess.check_call("git push", shell=True)
subprocess.check_call("git tag v{0}".format(get_version()), shell=True)
subprocess.check_call("git push --tags", shell=True)
| import importlib
import os
import sys
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
os.system(f"poetry version {get_version()}")
os.system("git add a2wsgi/* pyproject.toml")
os.system(f'git commit -m "v{get_version()}"')
os.system("git push")
os.system("git tag v{0}".format(get_version()))
os.system("git push --tags")
| Python | 0.000003 |
cbdee53bc2239277d314b93d09368ab2462ab8d6 | Allow variety of input types | geojsonio/geojsonio.py | geojsonio/geojsonio.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import argparse
import json
import sys
import urllib
import webbrowser
import github3
MAX_URL_LEN = 150e3 # Size threshold above which a gist is created
def display(contents, domain='http://geojson.io/'):
url = geojsonio_url(contents, domain)
webbrowser.open(url)
return url
# display() used to be called to_geojsonio. Keep it around for now...
to_geojsonio = display
def geojsonio_url(contents, domain='http://geojson.io/'):
"""
Returns the URL to open given the domain and contents
The input contents may be:
* string - assumed to be GeoJSON
* an object that implements __geo_interface__
A FeatureCollection will be constructed with one feature,
the object.
* a sequence of objects that each implement __geo_interface__
A FeatureCollection will be constructed with the objects
as the features
For more information about __geo_interface__ see:
https://gist.github.com/sgillies/2217756
If the contents are large, then a gist will be created.
"""
contents = _parse_contents(contents)
if len(contents) <= MAX_URL_LEN:
url = _data_url(domain, contents)
else:
gist = _create_gist(contents)
url = _gist_url(domain, gist.id)
return url
def _parse_contents(contents):
"""
Return a GeoJSON string from a variety of inputs.
See the documentation for geojsonio_url for the possible contents
input.
Returns
-------
GeoJSON string
"""
if isinstance(contents, basestring):
return contents
if hasattr(contents, '__geo_interface__'):
features = [_geo_to_feature(contents)]
else:
try:
feature_iter = iter(contents)
except TypeError, e:
raise ValueError('Unknown type for input')
features = []
for i, f in enumerate(feature_iter):
if not hasattr(f, '__geo_interface__'):
raise ValueError('Unknown type at index {}'.format(i))
features.append(_geo_to_feature(f))
data= {'type': 'FeatureCollection', 'features': features}
return json.dumps(data)
def _geo_to_feature(ob):
"""
Return a GeoJSON Feature from an object that implements
__geo_interface__
If the object's type is a geometry, return a Feature with empty
properties and the object's mapping as the feature geometry. If the
object's type is a Feature, then return it.
"""
mapping = ob.__geo_interface__
if mapping['type'] == 'Feature':
return mapping
else:
return {'type': 'Feature',
'geometry': mapping}
def _create_gist(contents, description='', filename='data.geojson'):
"""
Create and return an anonymous gist with a single file and specified
contents
"""
ghapi = github3.GitHub()
files = {filename: {'content': contents}}
gist = ghapi.create_gist(description, files)
return gist
def _data_url(domain, contents):
url = (domain + '#data=data:application/json,' +
urllib.quote(contents))
return url
def _gist_url(domain, gist_id):
url = (domain + '#id=gist:/{}'.format(gist_id))
return url
def main():
parser = argparse.ArgumentParser(
description='Quickly visualize GeoJSON data on geojson.io')
parser.add_argument('-p', '--print',
dest='do_print',
action='store_true',
help='print the URL')
parser.add_argument('-d', '--domain',
dest='domain',
default='http://geojson.io',
help='Alternate URL instead of http://geojson.io/')
parser.add_argument('filename',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help="The file to send to geojson.io")
args = parser.parse_args()
contents = args.filename.read()
url = geojsonio_url(contents, args.domain)
if args.do_print:
print(url)
else:
webbrowser.open(url)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
#-*- coding: utf-8 -*-
import argparse
import sys
import urllib
import webbrowser
import github3
MAX_URL_LEN = 150e3 # Size threshold above which a gist is created
def display(contents, domain='http://geojson.io/'):
url = geojsonio_url(contents, domain)
webbrowser.open(url)
return url
# display() used to be called to_geojsonio. Keep it around for now...
to_geojsonio = display
def geojsonio_url(contents, domain='http://geojson.io/'):
"""
Returns the URL to open given the domain and contents
If the contents are large, then a gist will be created.
"""
if len(contents) <= MAX_URL_LEN:
url = _data_url(domain, contents)
else:
gist = _create_gist(contents)
url = _gist_url(domain, gist.id)
return url
def _create_gist(contents, description='', filename='data.geojson'):
"""
Create and return an anonymous gist with a single file and specified
contents
"""
ghapi = github3.GitHub()
files = {filename: {'content': contents}}
gist = ghapi.create_gist(description, files)
return gist
def _data_url(domain, contents):
url = (domain + '#data=data:application/json,' +
urllib.quote(contents))
return url
def _gist_url(domain, gist_id):
url = (domain + '#id=gist:/{}'.format(gist_id))
return url
def main():
parser = argparse.ArgumentParser(
description='Quickly visualize GeoJSON data on geojson.io')
parser.add_argument('-p', '--print',
dest='do_print',
action='store_true',
help='print the URL')
parser.add_argument('-d', '--domain',
dest='domain',
default='http://geojson.io',
help='Alternate URL instead of http://geojson.io/')
parser.add_argument('filename',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help="The file to send to geojson.io")
args = parser.parse_args()
contents = args.filename.read()
url = geojsonio_url(contents, args.domain)
if args.do_print:
print(url)
else:
webbrowser.open(url)
if __name__ == '__main__':
main()
| Python | 0.000016 |
61d07e1ef8b01f80111f24efbbdf9fa02010daff | Handle missing package "imbox" with error message | intelmq/bots/collectors/mail/collector_mail_url.py | intelmq/bots/collectors/mail/collector_mail_url.py | # -*- coding: utf-8 -*-
import re
import sys
try:
import imbox
except ImportError:
imbox = None
import requests
from intelmq.lib.bot import Bot
from intelmq.lib.message import Report
class MailURLCollectorBot(Bot):
def init(self):
if imbox is None:
self.logger.error('Could not import imbox. Please install it.')
self.stop()
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host,
self.parameters.mail_user,
self.parameters.mail_password,
self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if (self.parameters.subject_regex and
not re.search(self.parameters.subject_regex,
message.subject)):
continue
self.logger.info("Reading email report")
for body in message.body['plain']:
match = re.search(self.parameters.url_regex, body)
if match:
url = match.group()
# Build request
self.http_header = getattr(self.parameters,
'http_header', {})
self.http_verify_cert = getattr(self.parameters,
'http_verify_cert', True)
if hasattr(self.parameters, 'http_user') and hasattr(
self.parameters, 'http_password'):
self.auth = (self.parameters.http_user,
self.parameters.http_password)
else:
self.auth = None
http_proxy = getattr(self.parameters, 'http_proxy', None)
https_proxy = getattr(self.parameters,
'http_ssl_proxy', None)
if http_proxy and https_proxy:
self.proxy = {'http': http_proxy, 'https': https_proxy}
else:
self.proxy = None
self.http_header['User-agent'] = self.parameters.http_user_agent
self.logger.info("Downloading report from %s" % url)
resp = requests.get(url=url,
auth=self.auth, proxies=self.proxy,
headers=self.http_header,
verify=self.http_verify_cert)
if resp.status_code // 100 != 2:
raise ValueError('HTTP response status code was {}.'
''.format(resp.status_code))
self.logger.info("Report downloaded.")
report = Report()
report.add("raw", resp.content)
report.add("feed.name",
self.parameters.feed)
report.add("feed.accuracy", self.parameters.accuracy)
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
if __name__ == "__main__":
bot = MailURLCollectorBot(sys.argv[1])
bot.start()
| # -*- coding: utf-8 -*-
import re
import sys
import imbox
import requests
from intelmq.lib.bot import Bot
from intelmq.lib.message import Report
class MailURLCollectorBot(Bot):
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host,
self.parameters.mail_user,
self.parameters.mail_password,
self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if (self.parameters.subject_regex and
not re.search(self.parameters.subject_regex,
message.subject)):
continue
self.logger.info("Reading email report")
for body in message.body['plain']:
match = re.search(self.parameters.url_regex, body)
if match:
url = match.group()
# Build request
self.http_header = getattr(self.parameters,
'http_header', {})
self.http_verify_cert = getattr(self.parameters,
'http_verify_cert', True)
if hasattr(self.parameters, 'http_user') and hasattr(
self.parameters, 'http_password'):
self.auth = (self.parameters.http_user,
self.parameters.http_password)
else:
self.auth = None
http_proxy = getattr(self.parameters, 'http_proxy', None)
https_proxy = getattr(self.parameters,
'http_ssl_proxy', None)
if http_proxy and https_proxy:
self.proxy = {'http': http_proxy, 'https': https_proxy}
else:
self.proxy = None
self.http_header['User-agent'] = self.parameters.http_user_agent
self.logger.info("Downloading report from %s" % url)
resp = requests.get(url=url,
auth=self.auth, proxies=self.proxy,
headers=self.http_header,
verify=self.http_verify_cert)
if resp.status_code // 100 != 2:
raise ValueError('HTTP response status code was {}.'
''.format(resp.status_code))
self.logger.info("Report downloaded.")
report = Report()
report.add("raw", resp.content)
report.add("feed.name",
self.parameters.feed)
report.add("feed.accuracy", self.parameters.accuracy)
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
if __name__ == "__main__":
bot = MailURLCollectorBot(sys.argv[1])
bot.start()
| Python | 0.000003 |
a4feb3abb75e9fd686546e877290c191961601e1 | Update trns_validate_Genbank_Genome.py | plugins/scripts/validate/trns_validate_Genbank_Genome.py | plugins/scripts/validate/trns_validate_Genbank_Genome.py | #!/usr/bin/env python
# standard library imports
import sys
import os
import argparse
import logging
import subprocess
# 3rd party imports
# None
# KBase imports
import biokbase.Transform.script_utils as script_utils
def transform(input_file=None,
level=logging.INFO, logger=None):
"""
Validate Genbank file.
Args:
input_directory: An genbank input file
Returns:
Any validation errors or success.
Authors:
Shinjae Yoo, Matt Henderson, Marcin Joachimiak
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting Genbank validation")
token = os.environ.get("KB_AUTH_TOKEN")
classpath = "/kb/dev_container/modules/transform/lib/jars/kbase/transform/kbase_transform_deps:$KB_TOP/lib/jars/kbase/genomes/kbase-genomes-20140411.jar:$KB_TOP/lib/jars/kbase/common/kbase-common-0.0.6.jar:$KB_TOP/lib/jars/jackson/jackson-annotations-2.2.3.jar:$KB_TOP/lib/jars/jackson/jackson-core-2.2.3.jar:$KB_TOP/lib/jars/jackson/jackson-databind-2.2.3.jar:$KB_TOP/lib/jars/kbase/transform/kbase_transform_deps.jar:$KB_TOP/lib/jars/kbase/auth/kbase-auth-1398468950-3552bb2.jar:$KB_TOP/lib/jars/kbase/workspace/WorkspaceClient-0.2.0.jar"
mc = 'us.kbase.genbank.ValidateGBK'
java_classpath = os.path.join(os.environ.get("KB_TOP"), classpath.replace('$KB_TOP', os.environ.get("KB_TOP")))
argslist = "{0}".format("--input_file {0}".format(input_file))
arguments = ["java", "-classpath", java_classpath, "us.kbase.genbank.ConvertGBK", argslist]
print arguments
tool_process = subprocess.Popen(arguments, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if len(stderr) > 0:
logger.error("Validation of Genbank.Genome failed on {0}".format(input_file))
sys.exit(1)
else:
logger.info("Validation of Genbank.Genome completed.")
sys.exit(0)
if __name__ == "__main__":
script_details = script_utils.parse_docs(transform.__doc__)
parser = argparse.ArgumentParser(prog=__file__,
description=script_details["Description"],
epilog=script_details["Authors"])
parser.add_argument("--input_file",
help=script_details["Args"]["input_file"],
action="store",
type=str,
nargs='?',
required=True)
args, unknown = parser.parse_known_args()
logger = script_utils.stderrlogger(__file__)
try:
transform(input_file=args.input_file,
logger = logger)
except Exception, e:
logger.exception(e)
sys.exit(1)
sys.exit(0)
| #!/usr/bin/env python
# standard library imports
import sys
import os
import argparse
import logging
import subprocess
# 3rd party imports
# None
# KBase imports
import biokbase.Transform.script_utils as script_utils
def transform(input_file=None,
level=logging.INFO, logger=None):
"""
Validate Genbank file.
Args:
input_directory: An genbank input file
Returns:
Any validation errors or success.
Authors:
Shinjae Yoo, Matt Henderson, Marcin Joachimiak
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting Genbank validation")
token = os.environ.get("KB_AUTH_TOKEN")
classpath = "/kb/dev_container/modules/transform/lib/jars/kbase/transform/GenBankTransform.jar:$KB_TOP/lib/jars/kbase/genomes/kbase-genomes-20140411.jar:$KB_TOP/lib/jars/kbase/common/kbase-common-0.0.6.jar:$KB_TOP/lib/jars/jackson/jackson-annotations-2.2.3.jar:$KB_TOP/lib/jars/jackson/jackson-core-2.2.3.jar:$KB_TOP/lib/jars/jackson/jackson-databind-2.2.3.jar:$KB_TOP/lib/jars/kbase/transform/GenBankTransform.jar:$KB_TOP/lib/jars/kbase/auth/kbase-auth-1398468950-3552bb2.jar:$KB_TOP/lib/jars/kbase/workspace/WorkspaceClient-0.2.0.jar"
mc = 'us.kbase.genbank.ValidateGBK'
java_classpath = os.path.join(os.environ.get("KB_TOP"), classpath.replace('$KB_TOP', os.environ.get("KB_TOP")))
argslist = "{0}".format("--input_file {0}".format(input_file))
arguments = ["java", "-classpath", java_classpath, "us.kbase.genbank.ConvertGBK", argslist]
print arguments
tool_process = subprocess.Popen(arguments, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if len(stderr) > 0:
logger.error("Validation of Genbank.Genome failed on {0}".format(input_file))
sys.exit(1)
else:
logger.info("Validation of Genbank.Genome completed.")
sys.exit(0)
if __name__ == "__main__":
script_details = script_utils.parse_docs(transform.__doc__)
parser = argparse.ArgumentParser(prog=__file__,
description=script_details["Description"],
epilog=script_details["Authors"])
parser.add_argument("--input_file",
help=script_details["Args"]["input_file"],
action="store",
type=str,
nargs='?',
required=True)
args, unknown = parser.parse_known_args()
logger = script_utils.stderrlogger(__file__)
try:
transform(input_file=args.input_file,
logger = logger)
except Exception, e:
logger.exception(e)
sys.exit(1)
sys.exit(0)
| Python | 0.000001 |
714815fd943207089c1805e01c4f476ddf7c6917 | Add suport for html reports | jasperserver/core/ExportExecutionRequestBuilder.py | jasperserver/core/ExportExecutionRequestBuilder.py | from time import sleep
from resources_mime_type import ResourceFilesMimeType as rmt
import json
class ExportExecutionRequestBuilder(object):
def __init__(self, ReportExecutionRequestBuilder,js_connect, requestId, exportId):
self.rerb = ReportExecutionRequestBuilder
self.requestId = requestId
self.exportId = exportId
self._connect = js_connect
self.result = {}
#self.opresult = opresult
path = "/reportExecutions"
self.url = self._connect._rest_url + '_v2' + path
#for each id. Get one file each time
def outputResource(self):
#exports = self.opresult.get('exports', [])
limit = 10
path = "/%s/exports/%s/outputResource" % (self.requestId, self.exportId)
content = None
while limit > 0:
response = self._connect.get(self.url + path).response
print "output-final {}".format(response.headers['output-final'])
if response.headers['output-final'] == "true":
break
limit = limit - 1
#result = response.content
self.result["content"] = response.content
return self
def attachment(self, attachmentId):
if not attachmentId or attachmentId == "/":
raise Exception("attachmentId mustn't be an empty string!")
limit = 10
path = "/%s/exports/%s/attachments/%s" % (self.requestId, self.exportId, attachmentId)
while limit > 0:
result = self.rerb.status().content
status = json.loads(result)
if status.get('value') == "ready":
break
sleep(1)
limit = limit - 1
print "before pedido attachment {} path {} conn {}".format(attachmentId, self.url + path, self._connect)
response = self._connect.get(self.url + path)
#print "path attach 5 {} content {}".format(self.url + path, response.content)
return response
def status(self):
path = "/%s/exports/%s/status" % (self.requestId, self.exportId)
#setHeader('Content-type',rmt.JSON,self._connect)
setHeader('accept','application/json',self._connect)
return self._connect.get(self.url + path)
def getResult(self, rtype=None):
return self.result.get(rtype.lower()) if rtype else self.result
'''def outputResource(self):
result = []
exports = self.opresult.get('exports', [])
for file in exports:
path = "/%s/exports/%s/outputResource" % (self.requestId, file.get("id"))
content = None
while True:
response = self._connect.get(self.url + path).response
print "output-final {}".format(response.headers['output-final'])
if response.headers['output-final'] == "true":
content = response.content
break
result.append(content)
return result'''
| from time import sleep
from resources_mime_type import ResourceFilesMimeType as rmt
import json
class ExportExecutionRequestBuilder(object):
def __init__(self, ReportExecutionRequestBuilder,js_connect, requestId, exportId):
self.rerb = ReportExecutionRequestBuilder
self.requestId = requestId
self.exportId = exportId
self._connect = js_connect
self.result = {}
#self.opresult = opresult
path = "/reportExecutions"
self.url = self._connect._rest_url + '_v2' + path
#for each id. Get one file each time
def outputResource(self):
#exports = self.opresult.get('exports', [])
path = "/%s/exports/%s/outputResource" % (self.requestId, self.exportId)
content = None
while True:
response = self._connect.get(self.url + path).response
print "output-final {}".format(response.headers['output-final'])
if response.headers['output-final'] == "true":
break
#result = response.content
self.result["content"] = response.content
return self
def attachment(self, attachmentId):
if not attachmentId or attachmentId == "/":
raise Exception("attachmentId mustn't be an empty string!")
path = "/%s/exports/%s/attachments/%s" % (self.requestId, self.exportId, attachmentId)
while True:
result = self.rerb.status().content
status = json.loads(result)
if status.get('value') == "ready":
break
sleep(1)
print "before pedido attachment {} path {} conn {}".format(attachmentId, self.url + path, self._connect)
response = self._connect.get(self.url + path)
#print "path attach 5 {} content {}".format(self.url + path, response.content)
return response
def status(self):
path = "/%s/exports/%s/status" % (self.requestId, self.exportId)
#setHeader('Content-type',rmt.JSON,self._connect)
setHeader('accept','application/json',self._connect)
return self._connect.get(self.url + path)
def getResult(self, rtype=None):
return self.result.get(rtype.lower()) if rtype else self.result
'''def outputResource(self):
result = []
exports = self.opresult.get('exports', [])
for file in exports:
path = "/%s/exports/%s/outputResource" % (self.requestId, file.get("id"))
content = None
while True:
response = self._connect.get(self.url + path).response
print "output-final {}".format(response.headers['output-final'])
if response.headers['output-final'] == "true":
content = response.content
break
result.append(content)
return result'''
| Python | 0 |
487f7a2235e8541670fc0e9949dd3c0fb80eb932 | fix formatting | projects/dendrites/permutedMNIST/experiments/__init__.py | projects/dendrites/permutedMNIST/experiments/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import CONFIGS as BASE
from .batch import CONFIGS as BATCH
from .batch_mnist import CONFIGS as BATCH_MNIST
from .centroid import CONFIGS as CENTROID
from .hyperparameter_search import CONFIGS as HYPERPARAMETERSEARCH
from .no_dendrites import CONFIGS as NO_DENDRITES
from .si_centroid import CONFIGS as SI_CENTROID
from .sp_context import CONFIGS as SP_CONTEXT
from .sp_context_search import CONFIGS as SP_PROTO
"""
Import and collect all experiment configurations into one CONFIG
"""
__all__ = ["CONFIGS"]
# Collect all configurations
CONFIGS = dict()
CONFIGS.update(BASE)
CONFIGS.update(BATCH)
CONFIGS.update(BATCH_MNIST)
CONFIGS.update(CENTROID)
CONFIGS.update(HYPERPARAMETERSEARCH)
CONFIGS.update(NO_DENDRITES)
CONFIGS.update(SI_CENTROID)
| # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import CONFIGS as BASE
from .batch import CONFIGS as BATCH
from .batch_mnist import CONFIGS as BATCH_MNIST
from .centroid import CONFIGS as CENTROID
from .hyperparameter_search import CONFIGS as HYPERPARAMETERSEARCH
from .no_dendrites import CONFIGS as NO_DENDRITES
from .si_centroid import CONFIGS as SI_CENTROID
from .sp_context import CONFIGS as SP_CONTEXT
from .sp_context_search import CONFIGS as SP_PROTO
"""
Import and collect all experiment configurations into one CONFIG
"""
__all__ = ["CONFIGS"]
# Collect all configurations
CONFIGS = dict()
CONFIGS.update(BASE)
CONFIGS.update(CENTROID)
CONFIGS.update(HYPERPARAMETERSEARCH)
CONFIGS.update(BATCH)
CONFIGS.update(BATCH_MNIST)
CONFIGS.update(CENTROID)
CONFIGS.update(NO_DENDRITES)
CONFIGS.update(SI_CENTROID)
| Python | 0.001459 |
57ac1c43181d3bd1f5a18a1ed3137c1b997e2533 | Fix lint error | keras_cv/layers/regularization/stochastic_depth.py | keras_cv/layers/regularization/stochastic_depth.py | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="keras_cv")
class StochasticDepth(tf.keras.layers.Layer):
"""
Implements the Stochastic Depth layer. It randomly drops residual branches
in residual architectures. It is used as a drop-in replacement for addition
operation. Note that this layer DOES NOT drop a residual block across
individual samples but across the entire batch.
Reference:
- [Deep Networks with Stochastic Depth](https://arxiv.org/abs/1603.09382).
- Docstring taken from [stochastic_depth.py](https://tinyurl.com/mr3y2af6)
Args:
survival_probability: float, the probability of the residual branch
being kept.
Usage:
`StochasticDepth` can be used in a residual network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = tf.keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.StochasticDepth()([input, residual])
# (...)
```
At train time, StochasticDepth returns:
$$
x[0] + b_l * x[1],
$$
where $b_l$ is a random Bernoulli variable with probability $P(b_l = 1) = p_l$
At test time, StochasticDepth rescales the activations of the residual
branch based on the survival probability ($p_l$):
$$
x[0] + p_l * x[1]
$$
"""
def __init__(self, survival_probability=0.5, **kwargs):
super().__init__(**kwargs)
self.survival_probability = survival_probability
def call(self, x, training=None):
if len(x) != 2:
raise ValueError(
f"""Input must be a list of length 2. """
f"""Got input with length={len(x)}."""
)
shortcut, residual = x
b_l = tf.keras.backend.random_bernoulli([], p=self.survival_probability)
if training:
return shortcut + b_l * residual
else:
return shortcut + self.survival_probability * residual
def get_config(self):
config = {"survival_probability": self.survival_probability}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="keras_cv")
class StochasticDepth(tf.keras.layers.Layer):
"""
Implements the Stochastic Depth layer. It randomly drops residual branches
in residual architectures. It is used as a drop-in replacement for addition
operation. Note that this layer DOES NOT drop a residual block across
individual samples but across the entire batch.
Reference:
- [Deep Networks with Stochastic Depth](https://arxiv.org/abs/1603.09382).
- Docstring taken from [tensorflow_addons/layers/stochastic_depth.py](tinyurl.com/mr3y2af6).
Args:
survival_probability: float, the probability of the residual branch
being kept.
Usage:
`StochasticDepth` can be used in a residual network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = tf.keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.StochasticDepth()([input, residual])
# (...)
```
At train time, StochasticDepth returns:
$$
x[0] + b_l * x[1],
$$
where $b_l$ is a random Bernoulli variable with probability $P(b_l = 1) = p_l$
At test time, StochasticDepth rescales the activations of the residual
branch based on the survival probability ($p_l$):
$$
x[0] + p_l * x[1]
$$
"""
def __init__(self, survival_probability=0.5, **kwargs):
super().__init__(**kwargs)
self.survival_probability = survival_probability
def call(self, x, training=None):
if len(x) != 2:
raise ValueError(
f"""Input must be a list of length 2. """
f"""Got input with length={len(x)}."""
)
shortcut, residual = x
b_l = tf.keras.backend.random_bernoulli([], p=self.survival_probability)
if training:
return shortcut + b_l * residual
else:
return shortcut + self.survival_probability * residual
def get_config(self):
config = {"survival_probability": self.survival_probability}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| Python | 0.000035 |
6fbfa11a6f13f8271687a83fc4de68f62d4a4501 | Fix encrpytion with custom salt | crypto/encrypt.py | crypto/encrypt.py | #!/bin/env python3
"""
Encrypt password with salt for unix
Usage:
encrypt.py [options] [--rounds <count>] [--sha512 | --sha256 | --md5 | --crypt] [<salt>]
Options:
--sha512
--sha256
--md5
--crypt
-r, --rounds <count> rounds[default: 1000]
"""
import sys
import crypt
from getpass import getpass
from docopt import docopt
# docopt(doc, argv=None, help=True, version=None, options_first=False))
default_flag = "--sha512"
methods = {
"--sha512" : {
"method": crypt.METHOD_SHA512,
"id": "6",
},
"--sha256" : {
"method": crypt.METHOD_SHA256,
"id": "5",
},
"--md5" : {
"method": crypt.METHOD_MD5,
"id": "1",
},
"--crypt" : {
"method": crypt.METHOD_CRYPT,
"id": "",
},
}
def get_method(opt, default=default_flag):
for key in methods.keys():
if opt.get(key, False):
return methods.get(key)
return methods.get(default_flag)
def main():
opt = docopt(__doc__, sys.argv[1:])
rounds = opt.get("--rounds")
methods = get_method(opt)
method = methods.get("method")
id_prefix = methods.get("id")
salt = opt.get("<salt>")
if not salt:
salt = crypt.mksalt(method)
else:
salt = "${}$rounds={}${}$".format(id_prefix, rounds, salt)
password = ""
if not sys.stdin.isatty():
password = sys.stdin.readline()
else:
password = getpass()
if not password:
sys.exit(1)
shadow = crypt.crypt(password, salt)
print(shadow)
def usage():
pass
if __name__ == "__main__":
main()
| #!/bin/env python3
"""
Encrypt password with salt for unix
Usage:
encrypt.py [options] [--sha512 | --sha256 | --md5 | --crypt] [<salt>]
Options:
--sha512
--sha256
--md5
--crypt
"""
import sys
import crypt
from getpass import getpass
from docopt import docopt
# docopt(doc, argv=None, help=True, version=None, options_first=False))
default_flag = {"--sha512":True}
methods = {
"--sha512" : crypt.METHOD_SHA512,
"--sha256" : crypt.METHOD_SHA256,
"--md5" : crypt.METHOD_MD5,
"--crypt" : crypt.METHOD_CRYPT,
}
def get_method(opt):
for key in methods.keys():
if opt.get(key, False):
return methods.get(key)
def main():
opt = docopt(__doc__, sys.argv[1:])
method = get_method(opt)
salt = opt.get("<salt>")
if not salt:
salt = crypt.mksalt(method)
password = ""
if not sys.stdin.isatty():
password = sys.stdin.readline()
else:
password = getpass()
if not password:
sys.exit(1)
shadow = crypt.crypt(password, salt)
print(shadow)
def usage():
pass
if __name__ == "__main__":
main()
| Python | 0 |
4c2f6372bb5c1db18998626049aa8e53e9889452 | Fix an invalid build dependency. | syzygy/trace/rpc/rpc.gyp | syzygy/trace/rpc/rpc.gyp | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'chromium_code': 1,
'midl_out_dir': '<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc',
},
'target_defaults': {
'all_dependent_settings': {
'include_dirs': ['<(SHARED_INTERMEDIATE_DIR)'],
},
},
'targets': [
{
'target_name': 'call_trace_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'CallTrace',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['call_trace_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc.h',
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc_c.cc',
'<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc/call_trace_rpc_s.cc',
],
'process_outputs_as_sources': 1,
},
{
'target_name': 'logger_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'Logger',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['logger_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
},
],
}
| # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'chromium_code': 1,
'midl_out_dir': '<(SHARED_INTERMEDIATE_DIR)/syzygy/trace/rpc',
},
'target_defaults': {
'all_dependent_settings': {
'include_dirs': ['<(SHARED_INTERMEDIATE_DIR)'],
},
},
'targets': [
{
'target_name': 'call_trace_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'CallTrace',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['call_trace_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
},
{
'target_name': 'logger_rpc_lib',
'type': 'static_library',
# Build our IDL file to the shared intermediate directory using the
# midl_rpc.gypi include (because the default rules for .idl files are
# specific to COM interfaces). This include expects the prefix and
# midl_out_dir variables to be defined.
'variables': {
'prefix': 'Logger',
},
'includes': ['../../build/midl_rpc.gypi'],
'sources': ['logger_rpc.idl'],
'dependencies': [
'<(src)/syzygy/common/rpc/rpc.gyp:common_rpc_lib',
'<(src)/syzygy/trace/protocol/protocol.gyp:protocol_lib',
],
},
],
}
| Python | 0.999952 |
84df8646ba396088e70ca8469b301d11d13d2da7 | Fix wrong query on running tis (#17631) | airflow/api/common/experimental/delete_dag.py | airflow/api/common/experimental/delete_dag.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Delete DAGs APIs."""
import logging
from sqlalchemy import or_
from airflow import models
from airflow.exceptions import AirflowException, DagNotFound
from airflow.models import DagModel, TaskFail
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils.session import provide_session
from airflow.utils.state import State
log = logging.getLogger(__name__)
@provide_session
def delete_dag(dag_id: str, keep_records_in_log: bool = True, session=None) -> int:
"""
:param dag_id: the dag_id of the DAG to delete
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:param session: session used
:return count of deleted dags
"""
log.info("Deleting DAG: %s", dag_id)
running_tis = (
session.query(models.TaskInstance.state)
.filter(models.TaskInstance.dag_id == dag_id)
.filter(models.TaskInstance.state == State.RUNNING)
.first()
)
if running_tis:
raise AirflowException("TaskInstances still running")
dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag is None:
raise DagNotFound(f"Dag id {dag_id} not found")
# Scheduler removes DAGs without files from serialized_dag table every dag_dir_list_interval.
# There may be a lag, so explicitly removes serialized DAG here.
if SerializedDagModel.has_dag(dag_id=dag_id, session=session):
SerializedDagModel.remove_dag(dag_id=dag_id, session=session)
count = 0
for model in models.base.Base._decl_class_registry.values():
if hasattr(model, "dag_id"):
if keep_records_in_log and model.__name__ == 'Log':
continue
cond = or_(model.dag_id == dag_id, model.dag_id.like(dag_id + ".%"))
count += session.query(model).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
parent_dag_id, task_id = dag_id.rsplit(".", 1)
for model in TaskFail, models.TaskInstance:
count += (
session.query(model).filter(model.dag_id == parent_dag_id, model.task_id == task_id).delete()
)
# Delete entries in Import Errors table for a deleted DAG
# This handles the case when the dag_id is changed in the file
session.query(models.ImportError).filter(models.ImportError.filename == dag.fileloc).delete(
synchronize_session='fetch'
)
return count
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Delete DAGs APIs."""
import logging
from sqlalchemy import or_
from airflow import models
from airflow.exceptions import AirflowException, DagNotFound
from airflow.models import DagModel, TaskFail
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils.session import provide_session
from airflow.utils.state import State
log = logging.getLogger(__name__)
@provide_session
def delete_dag(dag_id: str, keep_records_in_log: bool = True, session=None) -> int:
"""
:param dag_id: the dag_id of the DAG to delete
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:param session: session used
:return count of deleted dags
"""
log.info("Deleting DAG: %s", dag_id)
running_tis = (
session.query(models.TaskInstance.state).filter(models.TaskInstance.state.in_(State.unfinished)).all()
)
if running_tis:
raise AirflowException("TaskInstances still running")
dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag is None:
raise DagNotFound(f"Dag id {dag_id} not found")
# Scheduler removes DAGs without files from serialized_dag table every dag_dir_list_interval.
# There may be a lag, so explicitly removes serialized DAG here.
if SerializedDagModel.has_dag(dag_id=dag_id, session=session):
SerializedDagModel.remove_dag(dag_id=dag_id, session=session)
count = 0
for model in models.base.Base._decl_class_registry.values():
if hasattr(model, "dag_id"):
if keep_records_in_log and model.__name__ == 'Log':
continue
cond = or_(model.dag_id == dag_id, model.dag_id.like(dag_id + ".%"))
count += session.query(model).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
parent_dag_id, task_id = dag_id.rsplit(".", 1)
for model in TaskFail, models.TaskInstance:
count += (
session.query(model).filter(model.dag_id == parent_dag_id, model.task_id == task_id).delete()
)
# Delete entries in Import Errors table for a deleted DAG
# This handles the case when the dag_id is changed in the file
session.query(models.ImportError).filter(models.ImportError.filename == dag.fileloc).delete(
synchronize_session='fetch'
)
return count
| Python | 0.000017 |
bf87d7a60f20d9811fe2ff2c579f52b3e77a1ed3 | Remove unneeded print statement. | ctree/c/dotgen.py | ctree/c/dotgen.py | """
DOT generator for C constructs.
"""
from ctree.dotgen import DotGenLabeller
from ctree.types import codegen_type
class CDotGenLabeller(DotGenLabeller):
"""
Manages generation of DOT.
"""
def visit_SymbolRef(self, node):
s = r""
if node._global:
s += r"__global "
if node._local:
s += r"__local "
if node._const:
s += r"__const "
if node.type is not None:
s += r"%s " % codegen_type(node.type)
s += r"%s" % node.name
return s
def visit_FunctionDecl(self, node):
s = r""
if node.static:
s += r"static "
if node.inline:
s += r"inline "
if node.kernel:
s += r"__kernel "
s += r"%s %s(...)" % (codegen_type(node.return_type), node.name)
return s
def visit_Constant(self, node):
return str(node.value)
def visit_String(self, node):
return r'\" \"'.join(node.values)
def visit_CFile(self, node):
return node.get_filename()
def visit_NdPointer(self, node):
s = "dtype: %s\n" % node.ptr.dtype
s += "ndim, shape: %s, %s\n" % (node.ptr.ndim, node.ptr.shape)
s += "flags: %s" % node.ptr.flags
return s
def visit_BinaryOp(self, node):
return type(node.op).__name__
def visit_UnaryOp(self, node):
return type(node.op).__name__
| """
DOT generator for C constructs.
"""
from ctree.dotgen import DotGenLabeller
from ctree.types import codegen_type
class CDotGenLabeller(DotGenLabeller):
"""
Manages generation of DOT.
"""
def visit_SymbolRef(self, node):
s = r""
if node._global:
s += r"__global "
if node._local:
s += r"__local "
if node._const:
s += r"__const "
if node.type is not None:
s += r"%s " % codegen_type(node.type)
print(node.type)
s += r"%s" % node.name
return s
def visit_FunctionDecl(self, node):
s = r""
if node.static:
s += r"static "
if node.inline:
s += r"inline "
if node.kernel:
s += r"__kernel "
s += r"%s %s(...)" % (codegen_type(node.return_type), node.name)
return s
def visit_Constant(self, node):
return str(node.value)
def visit_String(self, node):
return r'\" \"'.join(node.values)
def visit_CFile(self, node):
return node.get_filename()
def visit_NdPointer(self, node):
s = "dtype: %s\n" % node.ptr.dtype
s += "ndim, shape: %s, %s\n" % (node.ptr.ndim, node.ptr.shape)
s += "flags: %s" % node.ptr.flags
return s
def visit_BinaryOp(self, node):
return type(node.op).__name__
def visit_UnaryOp(self, node):
return type(node.op).__name__
| Python | 0.000005 |
742cdc4419449a8190bddd8439c3559a1bf19180 | fix a bug, adding import | alphatwirl/concurrently/TaskPackageDropbox.py | alphatwirl/concurrently/TaskPackageDropbox.py | # Tai Sakuma <tai.sakuma@cern.ch>
import logging
import time
from operator import itemgetter
from .WorkingArea import WorkingArea
##__________________________________________________________________||
class TaskPackageDropbox(object):
"""A drop box for task packages.
It puts task packages in a working area and dispatches runners
that execute the tasks.
"""
def __init__(self, workingArea, dispatcher):
self.workingArea = workingArea
self.dispatcher = dispatcher
def __repr__(self):
return '{}(workingArea = {!r}, dispatcher = {!r})'.format(
self.__class__.__name__,
self.workingArea,
self.dispatcher
)
def open(self):
self.workingArea.open()
self.runid_package_index_map = { }
def put(self, package):
package_index = self.workingArea.put_package(package)
runid = self.dispatcher.run(self.workingArea, package_index)
self.runid_package_index_map[runid] = package_index
def receive(self):
package_index_result_pairs = [ ] # a list of (package_index, _result)
try:
sleep = 5
while self.runid_package_index_map:
finished_runid = self.dispatcher.poll()
# e.g., [1001, 1003]
package_indices = [self.runid_package_index_map.pop(i) for i in finished_runid]
# e.g., [0, 2]
pairs = [(i, self.workingArea.collect_result(i)) for i in package_indices]
# e.g., [(0, result0), (2, None)] # None indicates the job failed
failed_package_indices = [i for i, r in pairs if r is None]
# e.g., [2]
pairs = [(i, r) for i, r in pairs if i not in failed_package_indices]
# e.g., [(0, result0)] # only successful ones
# rerun failed jobs
for package_index in failed_package_indices:
logger = logging.getLogger(__name__)
logger.warning('resubmitting {}'.format(self.workingArea.package_path(package_index)))
runid = self.dispatcher.run(self.workingArea, package_index)
self.runid_package_index_map[runid] = package_index
package_index_result_pairs.extend(pairs)
time.sleep(sleep)
except KeyboardInterrupt:
logger = logging.getLogger(__name__)
logger.warning('received KeyboardInterrupt')
self.dispatcher.terminate()
# sort in the order of package_index
package_index_result_pairs = sorted(package_index_result_pairs, key = itemgetter(0))
results = [result for i, result in package_index_result_pairs]
return results
def close(self):
self.dispatcher.terminate()
self.workingArea.close()
##__________________________________________________________________||
| # Tai Sakuma <tai.sakuma@cern.ch>
import logging
from operator import itemgetter
from .WorkingArea import WorkingArea
##__________________________________________________________________||
class TaskPackageDropbox(object):
"""A drop box for task packages.
It puts task packages in a working area and dispatches runners
that execute the tasks.
"""
def __init__(self, workingArea, dispatcher):
self.workingArea = workingArea
self.dispatcher = dispatcher
def __repr__(self):
return '{}(workingArea = {!r}, dispatcher = {!r})'.format(
self.__class__.__name__,
self.workingArea,
self.dispatcher
)
def open(self):
self.workingArea.open()
self.runid_package_index_map = { }
def put(self, package):
package_index = self.workingArea.put_package(package)
runid = self.dispatcher.run(self.workingArea, package_index)
self.runid_package_index_map[runid] = package_index
def receive(self):
package_index_result_pairs = [ ] # a list of (package_index, _result)
try:
sleep = 5
while self.runid_package_index_map:
finished_runid = self.dispatcher.poll()
# e.g., [1001, 1003]
package_indices = [self.runid_package_index_map.pop(i) for i in finished_runid]
# e.g., [0, 2]
pairs = [(i, self.workingArea.collect_result(i)) for i in package_indices]
# e.g., [(0, result0), (2, None)] # None indicates the job failed
failed_package_indices = [i for i, r in pairs if r is None]
# e.g., [2]
pairs = [(i, r) for i, r in pairs if i not in failed_package_indices]
# e.g., [(0, result0)] # only successful ones
# rerun failed jobs
for package_index in failed_package_indices:
logger = logging.getLogger(__name__)
logger.warning('resubmitting {}'.format(self.workingArea.package_path(package_index)))
runid = self.dispatcher.run(self.workingArea, package_index)
self.runid_package_index_map[runid] = package_index
package_index_result_pairs.extend(pairs)
time.sleep(sleep)
except KeyboardInterrupt:
logger = logging.getLogger(__name__)
logger.warning('received KeyboardInterrupt')
self.dispatcher.terminate()
# sort in the order of package_index
package_index_result_pairs = sorted(package_index_result_pairs, key = itemgetter(0))
results = [result for i, result in package_index_result_pairs]
return results
def close(self):
self.dispatcher.terminate()
self.workingArea.close()
##__________________________________________________________________||
| Python | 0.000001 |
50861c6d256438afd880aebbb3a19ea360367fac | upgrade IdentityDetailSerializer to DRF3 | api/serializers/identity_detail_serializer.py | api/serializers/identity_detail_serializer.py | from core.models.identity import Identity
from rest_framework import serializers
class IdentityDetailSerializer(serializers.ModelSerializer):
# created_by = serializers.CharField(source='creator_name')
quota = serializers.ReadOnlyField(source='get_quota_dict')
provider_id = serializers.ReadOnlyField(source='provider.uuid')
id = serializers.ReadOnlyField(source="uuid")
class Meta:
model = Identity
fields = ('id', 'provider_id', 'quota') | from core.models.identity import Identity
from rest_framework import serializers
class IdentityDetailSerializer(serializers.ModelSerializer):
created_by = serializers.CharField(source='creator_name')
quota = serializers.Field(source='get_quota_dict')
provider_id = serializers.Field(source='provider.uuid')
id = serializers.Field(source="uuid")
class Meta:
model = Identity
exclude = ('credentials', 'created_by', 'provider', 'uuid') | Python | 0 |
ef18eb5ce3ed8c65a1cf57c139cd5380f76ef707 | Improve `is_node` error message | graphene/relay/node.py | graphene/relay/node.py | from functools import partial
import six
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceMeta
def is_node(objecttype):
'''
Check if the given objecttype has Node as an interface
'''
assert issubclass(objecttype, ObjectType), (
'Only ObjectTypes can have a Node interface. Received %s'
) % objecttype
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
def get_default_connection(cls):
from .connection import Connection
assert issubclass(cls, ObjectType), (
'Can only get connection type on implemented Nodes.'
)
class Meta:
node = cls
return type('{}Connection'.format(cls.__name__), (Connection,), {'Meta': Meta})
class GlobalID(Field):
def __init__(self, node, *args, **kwargs):
super(GlobalID, self).__init__(ID, *args, **kwargs)
self.node = node
@staticmethod
def id_resolver(parent_resolver, node, root, args, context, info):
id = parent_resolver(root, args, context, info)
return node.to_global_id(info.parent_type.name, id) # root._meta.name
def get_resolver(self, parent_resolver):
return partial(self.id_resolver, parent_resolver, self.node)
class NodeMeta(InterfaceMeta):
def __new__(cls, name, bases, attrs):
cls = InterfaceMeta.__new__(cls, name, bases, attrs)
cls._meta.fields['id'] = GlobalID(cls, required=True, description='The ID of the object.')
return cls
class NodeField(Field):
def __init__(self, node, type=False, deprecation_reason=None,
name=None, **kwargs):
assert issubclass(node, Node), 'NodeField can only operate in Nodes'
type = type or node
super(NodeField, self).__init__(
type,
description='The ID of the object',
id=ID(required=True),
resolver=node.node_resolver
)
class Node(six.with_metaclass(NodeMeta, Interface)):
'''An object with an ID'''
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, root, args, context, info):
return cls.get_node_from_global_id(args.get('id'), context, info)
@classmethod
def get_node_from_global_id(cls, global_id, context, info):
try:
_type, _id = cls.from_global_id(global_id)
graphene_type = info.schema.get_type(_type).graphene_type
# We make sure the ObjectType implements the "Node" interface
assert cls in graphene_type._meta.interfaces
except:
return None
get_node = getattr(graphene_type, 'get_node', None)
if get_node:
return get_node(_id, context, info)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type, id):
return to_global_id(type, id)
@classmethod
def implements(cls, objecttype):
get_connection = getattr(objecttype, 'get_connection', None)
if not get_connection:
get_connection = partial(get_default_connection, objecttype)
objecttype.Connection = get_connection()
| from functools import partial
import six
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceMeta
def is_node(objecttype):
'''
Check if the given objecttype has Node as an interface
'''
assert issubclass(objecttype, ObjectType), (
'Only ObjectTypes can have a Node interface.'
)
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
def get_default_connection(cls):
from .connection import Connection
assert issubclass(cls, ObjectType), (
'Can only get connection type on implemented Nodes.'
)
class Meta:
node = cls
return type('{}Connection'.format(cls.__name__), (Connection,), {'Meta': Meta})
class GlobalID(Field):
def __init__(self, node, *args, **kwargs):
super(GlobalID, self).__init__(ID, *args, **kwargs)
self.node = node
@staticmethod
def id_resolver(parent_resolver, node, root, args, context, info):
id = parent_resolver(root, args, context, info)
return node.to_global_id(info.parent_type.name, id) # root._meta.name
def get_resolver(self, parent_resolver):
return partial(self.id_resolver, parent_resolver, self.node)
class NodeMeta(InterfaceMeta):
def __new__(cls, name, bases, attrs):
cls = InterfaceMeta.__new__(cls, name, bases, attrs)
cls._meta.fields['id'] = GlobalID(cls, required=True, description='The ID of the object.')
return cls
class NodeField(Field):
def __init__(self, node, type=False, deprecation_reason=None,
name=None, **kwargs):
assert issubclass(node, Node), 'NodeField can only operate in Nodes'
type = type or node
super(NodeField, self).__init__(
type,
description='The ID of the object',
id=ID(required=True),
resolver=node.node_resolver
)
class Node(six.with_metaclass(NodeMeta, Interface)):
'''An object with an ID'''
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, root, args, context, info):
return cls.get_node_from_global_id(args.get('id'), context, info)
@classmethod
def get_node_from_global_id(cls, global_id, context, info):
try:
_type, _id = cls.from_global_id(global_id)
graphene_type = info.schema.get_type(_type).graphene_type
# We make sure the ObjectType implements the "Node" interface
assert cls in graphene_type._meta.interfaces
except:
return None
get_node = getattr(graphene_type, 'get_node', None)
if get_node:
return get_node(_id, context, info)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type, id):
return to_global_id(type, id)
@classmethod
def implements(cls, objecttype):
get_connection = getattr(objecttype, 'get_connection', None)
if not get_connection:
get_connection = partial(get_default_connection, objecttype)
objecttype.Connection = get_connection()
| Python | 0.000002 |
f21180292db82abfc69272c5c1b9e50c68645eca | fix gdpr form scrub tests in python 3 | corehq/apps/users/management/commands/gdpr_scrub_user_from_forms.py | corehq/apps/users/management/commands/gdpr_scrub_user_from_forms.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from corehq.apps.users.models import CouchUser
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from io import StringIO
from lxml import etree
import sys
import six
import logging
logger = logging.getLogger(__name__)
NEW_USERNAME = "Redacted User (GDPR)"
class Command(BaseCommand):
help = "Scrubs the username from all forms associated with the given user"
def add_arguments(self, parser):
parser.add_argument('username')
parser.add_argument('domain')
def handle(self, username, domain, **options):
this_form_accessor = FormAccessors(domain=domain)
user = CouchUser.get_by_username(username)
if not user:
logger.info("User {} not found.".format(username))
sys.exit(1)
user_id = user._id
form_ids = this_form_accessor.get_form_ids_for_user(user_id)
input_response = six.moves.input(
"Update {} form(s) for user {} in domain {}? (y/n): ".format(len(form_ids), username, domain))
if input_response == "y":
for form_data in this_form_accessor.iter_forms(form_ids):
form_attachment_xml_new = self.update_form_data(form_data, NEW_USERNAME)
this_form_accessor.modify_attachment_xml_and_metadata(form_data,
form_attachment_xml_new,
NEW_USERNAME)
logging.info("Updated {} form(s) for user {} in domain {}".format(len(form_ids), username, domain))
elif input_response == "n":
logging.info("No forms updated, exiting.")
else:
logging.info("Command not recognized. Exiting.")
@staticmethod
def update_form_data(form_data, new_username):
form_attachment_xml = form_data.get_attachment("form.xml").decode('utf-8')
xml_elem = etree.parse(StringIO(form_attachment_xml))
id_elem = xml_elem.find("{http://openrosa.org/jr/xforms}meta").find(
"{http://openrosa.org/jr/xforms}username")
id_elem.text = new_username
new_form_attachment_xml = etree.tostring(xml_elem)
return new_form_attachment_xml
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from corehq.apps.users.models import CouchUser
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from io import StringIO
from lxml import etree
import sys
import six
import logging
logger = logging.getLogger(__name__)
NEW_USERNAME = "Redacted User (GDPR)"
class Command(BaseCommand):
help = "Scrubs the username from all forms associated with the given user"
def add_arguments(self, parser):
parser.add_argument('username')
parser.add_argument('domain')
def handle(self, username, domain, **options):
this_form_accessor = FormAccessors(domain=domain)
user = CouchUser.get_by_username(username)
if not user:
logger.info("User {} not found.".format(username))
sys.exit(1)
user_id = user._id
form_ids = this_form_accessor.get_form_ids_for_user(user_id)
input_response = six.moves.input(
"Update {} form(s) for user {} in domain {}? (y/n): ".format(len(form_ids), username, domain))
if input_response == "y":
for form_data in this_form_accessor.iter_forms(form_ids):
form_attachment_xml_new = self.update_form_data(form_data, NEW_USERNAME)
this_form_accessor.modify_attachment_xml_and_metadata(form_data,
form_attachment_xml_new,
NEW_USERNAME)
logging.info("Updated {} form(s) for user {} in domain {}".format(len(form_ids), username, domain))
elif input_response == "n":
logging.info("No forms updated, exiting.")
else:
logging.info("Command not recognized. Exiting.")
@staticmethod
def update_form_data(form_data, new_username):
form_attachment_xml = form_data.get_attachment("form.xml")
xml_elem = etree.parse(StringIO(six.text_type(form_attachment_xml)))
id_elem = xml_elem.find("{http://openrosa.org/jr/xforms}meta").find(
"{http://openrosa.org/jr/xforms}username")
id_elem.text = new_username
new_form_attachment_xml = etree.tostring(xml_elem)
return new_form_attachment_xml
| Python | 0 |
1e4d80c50aaf253fd2bad9a2139737d8bf8dc927 | fix escape sequence DeprecationWarning (#1595) | gym/spaces/discrete.py | gym/spaces/discrete.py | import numpy as np
from .space import Space
class Discrete(Space):
r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`.
Example::
>>> Discrete(2)
"""
def __init__(self, n):
assert n >= 0
self.n = n
super(Discrete, self).__init__((), np.int64)
def sample(self):
return self.np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.kind in np.typecodes['AllInteger'] and x.shape == ()):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return isinstance(other, Discrete) and self.n == other.n
| import numpy as np
from .space import Space
class Discrete(Space):
"""A discrete space in :math:`\{ 0, 1, \dots, n-1 \}`.
Example::
>>> Discrete(2)
"""
def __init__(self, n):
assert n >= 0
self.n = n
super(Discrete, self).__init__((), np.int64)
def sample(self):
return self.np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.kind in np.typecodes['AllInteger'] and x.shape == ()):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return isinstance(other, Discrete) and self.n == other.n
| Python | 0.000004 |
50248c3989624f935a4ff2a80229b997ca77f5c2 | fix generator issue | hazm/SequenceTagger.py | hazm/SequenceTagger.py | # coding: utf8
from __future__ import unicode_literals
from nltk.tag.api import TaggerI
from wapiti import Model
class SequenceTagger(TaggerI):
""" wrapper for [Wapiti](http://wapiti.limsi.fr) sequence tagger
>>> tagger = SequenceTagger(patterns=['*', 'U:word-%x[0,0]'])
>>> tagger.train([[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]])
>>> tagger.tag_sents([['ู
ู', 'ุจู', 'ู
ุฏุฑุณู', 'ุฑูุชู_ุจูุฏู
', '.']])
[[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]]
>>> tagger.save_model('test.tagger')
>>> SequenceTagger(model='test.tagger').tag_sents([['ู
ู', 'ุจู', 'ู
ุฏุฑุณู', 'ุฑูุชู_ุจูุฏู
', '.']])
[[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]]
"""
def __init__(self, patterns=[], **options):
self.model = Model(patterns='\n'.join(patterns), **options)
def train(self, sentences):
self.model.train(['\n'.join([' '.join(word) for word in sentence]) for sentence in sentences])
def save_model(self, filename):
self.model.save(filename)
def tag_sents(self, sentences):
sentences = list(sentences)
lines = '\n\n'.join(['\n'.join(sentence) for sentence in sentences])
results = self.model.label_sequence(lines).decode('utf8')
tags = iter(results.strip().split('\n'))
return [[(word, next(tags)) for word in sentence] for sentence in sentences]
| # coding: utf8
from __future__ import unicode_literals
from nltk.tag.api import TaggerI
from wapiti import Model
class SequenceTagger(TaggerI):
""" wrapper for [Wapiti](http://wapiti.limsi.fr) sequence tagger
>>> tagger = SequenceTagger(patterns=['*', 'U:word-%x[0,0]'])
>>> tagger.train([[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]])
>>> tagger.tag_sents([['ู
ู', 'ุจู', 'ู
ุฏุฑุณู', 'ุฑูุชู_ุจูุฏู
', '.']])
[[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]]
>>> tagger.save_model('test.tagger')
>>> SequenceTagger(model='test.tagger').tag_sents([['ู
ู', 'ุจู', 'ู
ุฏุฑุณู', 'ุฑูุชู_ุจูุฏู
', '.']])
[[('ู
ู', 'PRO'), ('ุจู', 'P'), ('ู
ุฏุฑุณู', 'N'), ('ุฑูุชู_ุจูุฏู
', 'V'), ('.', 'PUNC')]]
"""
def __init__(self, patterns=[], **options):
self.model = Model(patterns='\n'.join(patterns), **options)
def train(self, sentences):
self.model.train(['\n'.join([' '.join(word) for word in sentence]) for sentence in sentences])
def save_model(self, filename):
self.model.save(filename)
def tag_sents(self, sentences):
lines = '\n\n'.join(['\n'.join(sentence) for sentence in sentences])
results = self.model.label_sequence(lines).decode('utf8')
tags = iter(results.strip().split('\n'))
return [[(word, next(tags)) for word in sentence] for sentence in sentences]
| Python | 0 |
649a70d825d2182e3d5a4f42a83f377b66043e09 | bump version | yandextank/version.py | yandextank/version.py | VERSION = '1.17.2'
| VERSION = '1.17.1'
| Python | 0.000001 |
60d93c3ade6f465e627c6c47c17d9c86e2b52f2a | Handle None challenge | app/grandchallenge/core/context_processors.py | app/grandchallenge/core/context_processors.py | import logging
from django.conf import settings
from guardian.shortcuts import get_perms
from guardian.utils import get_anonymous_user
logger = logging.getLogger(__name__)
def challenge(request):
try:
challenge = request.challenge
if challenge is None:
return {}
except AttributeError:
logger.warning(f"Could not get challenge for request: {request}")
return {}
try:
user = request.user
except AttributeError:
user = get_anonymous_user()
return {
"challenge": challenge,
"challenge_perms": get_perms(user, challenge),
"user_is_participant": challenge.is_participant(user),
"pages": challenge.page_set.all(),
}
def google_keys(*_, **__):
return {
"google_analytics_id": settings.GOOGLE_ANALYTICS_ID,
"geochart_api_key": settings.GOOGLE_MAPS_API_KEY,
}
def debug(*_, **__):
return {"DEBUG": settings.DEBUG}
| import logging
from django.conf import settings
from guardian.shortcuts import get_perms
from guardian.utils import get_anonymous_user
logger = logging.getLogger(__name__)
def challenge(request):
try:
challenge = request.challenge
except AttributeError:
logger.warning(f"Could not get challenge for request: {request}")
return {}
try:
user = request.user
except AttributeError:
user = get_anonymous_user()
return {
"challenge": challenge,
"challenge_perms": get_perms(user, challenge),
"user_is_participant": challenge.is_participant(user),
"pages": challenge.page_set.all(),
}
def google_keys(*_, **__):
return {
"google_analytics_id": settings.GOOGLE_ANALYTICS_ID,
"geochart_api_key": settings.GOOGLE_MAPS_API_KEY,
}
def debug(*_, **__):
return {"DEBUG": settings.DEBUG}
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.