commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
28d1875008dc1201e14391aac7724a52471866fa | add view funtion to browserlib | percyfal/bokeh,timsnyder/bokeh,abele/bokeh,srinathv/bokeh,canavandl/bokeh,roxyboy/bokeh,Karel-van-de-Plassche/bokeh,schoolie/bokeh,paultcochrane/bokeh,rhiever/bokeh,saifrahmed/bokeh,azjps/bokeh,aavanian/bokeh,tacaswell/bokeh,jplourenco/bokeh,rhiever/bokeh,akloster/bokeh,ericdill/bokeh,bsipocz/bokeh,philippjfr/bokeh,KasperPRasmussen/bokeh,KasperPRasmussen/bokeh,paultcochrane/bokeh,muku42/bokeh,deeplook/bokeh,sahat/bokeh,muku42/bokeh,philippjfr/bokeh,akloster/bokeh,paultcochrane/bokeh,justacec/bokeh,KasperPRasmussen/bokeh,caseyclements/bokeh,mindriot101/bokeh,satishgoda/bokeh,dennisobrien/bokeh,Karel-van-de-Plassche/bokeh,azjps/bokeh,ChristosChristofidis/bokeh,almarklein/bokeh,aiguofer/bokeh,carlvlewis/bokeh,phobson/bokeh,alan-unravel/bokeh,jplourenco/bokeh,caseyclements/bokeh,schoolie/bokeh,schoolie/bokeh,justacec/bokeh,PythonCharmers/bokeh,percyfal/bokeh,rs2/bokeh,matbra/bokeh,khkaminska/bokeh,alan-unravel/bokeh,canavandl/bokeh,timsnyder/bokeh,laurent-george/bokeh,aavanian/bokeh,CrazyGuo/bokeh,roxyboy/bokeh,carlvlewis/bokeh,muku42/bokeh,saifrahmed/bokeh,jakirkham/bokeh,ahmadia/bokeh,gpfreitas/bokeh,clairetang6/bokeh,alan-unravel/bokeh,jakirkham/bokeh,schoolie/bokeh,awanke/bokeh,ericdill/bokeh,timsnyder/bokeh,rs2/bokeh,htygithub/bokeh,bsipocz/bokeh,DuCorey/bokeh,msarahan/bokeh,stonebig/bokeh,srinathv/bokeh,srinathv/bokeh,philippjfr/bokeh,stonebig/bokeh,laurent-george/bokeh,aavanian/bokeh,daodaoliang/bokeh,dennisobrien/bokeh,rothnic/bokeh,caseyclements/bokeh,ericmjl/bokeh,caseyclements/bokeh,ahmadia/bokeh,Karel-van-de-Plassche/bokeh,paultcochrane/bokeh,htygithub/bokeh,bokeh/bokeh,ahmadia/bokeh,matbra/bokeh,clairetang6/bokeh,stuart-knock/bokeh,almarklein/bokeh,ericmjl/bokeh,jakirkham/bokeh,maxalbert/bokeh,quasiben/bokeh,timothydmorton/bokeh,josherick/bokeh,bokeh/bokeh,dennisobrien/bokeh,rhiever/bokeh,phobson/bokeh,stonebig/bokeh,gpfreitas/bokeh,laurent-george/bokeh,bokeh/bokeh,carlvlewis/bokeh,ahmadia/bokeh,ericmjl/bokeh,birdsarah/bokeh,azjps/bokeh,percyfal/bokeh,rhiever/bokeh,rs2/bokeh,CrazyGuo/bokeh,timsnyder/bokeh,ChristosChristofidis/bokeh,laurent-george/bokeh,sahat/bokeh,satishgoda/bokeh,stuart-knock/bokeh,abele/bokeh,evidation-health/bokeh,daodaoliang/bokeh,jakirkham/bokeh,KasperPRasmussen/bokeh,deeplook/bokeh,jplourenco/bokeh,bsipocz/bokeh,jplourenco/bokeh,lukebarnard1/bokeh,ptitjano/bokeh,satishgoda/bokeh,khkaminska/bokeh,mutirri/bokeh,srinathv/bokeh,mindriot101/bokeh,timothydmorton/bokeh,DuCorey/bokeh,schoolie/bokeh,KasperPRasmussen/bokeh,rothnic/bokeh,timsnyder/bokeh,percyfal/bokeh,ericmjl/bokeh,lukebarnard1/bokeh,msarahan/bokeh,ChinaQuants/bokeh,maxalbert/bokeh,akloster/bokeh,bsipocz/bokeh,almarklein/bokeh,stuart-knock/bokeh,Karel-van-de-Plassche/bokeh,roxyboy/bokeh,deeplook/bokeh,philippjfr/bokeh,timothydmorton/bokeh,alan-unravel/bokeh,daodaoliang/bokeh,ericdill/bokeh,azjps/bokeh,draperjames/bokeh,awanke/bokeh,deeplook/bokeh,Karel-van-de-Plassche/bokeh,stonebig/bokeh,ptitjano/bokeh,clairetang6/bokeh,xguse/bokeh,jakirkham/bokeh,awanke/bokeh,htygithub/bokeh,phobson/bokeh,phobson/bokeh,abele/bokeh,mutirri/bokeh,xguse/bokeh,DuCorey/bokeh,msarahan/bokeh,PythonCharmers/bokeh,draperjames/bokeh,rothnic/bokeh,aiguofer/bokeh,draperjames/bokeh,canavandl/bokeh,tacaswell/bokeh,matbra/bokeh,rothnic/bokeh,bokeh/bokeh,birdsarah/bokeh,mutirri/bokeh,msarahan/bokeh,tacaswell/bokeh,rs2/bokeh,mindriot101/bokeh,ptitjano/bokeh,ChinaQuants/bokeh,abele/bokeh,quasiben/bokeh,xguse/bokeh,aiguofer/bokeh,clairetang6/bokeh,azjps/bokeh,eteq/bokeh,rs2/bokeh,mutirri/bokeh,philippjfr/bokeh,gpfreitas/bokeh,roxyboy/bokeh,stuart-knock/bokeh,lukebarnard1/bokeh,gpfreitas/bokeh,canavandl/bokeh,maxalbert/bokeh,ChristosChristofidis/bokeh,ericdill/bokeh,daodaoliang/bokeh,saifrahmed/bokeh,evidation-health/bokeh,eteq/bokeh,maxalbert/bokeh,quasiben/bokeh,aavanian/bokeh,justacec/bokeh,draperjames/bokeh,satishgoda/bokeh,htygithub/bokeh,ptitjano/bokeh,mindriot101/bokeh,eteq/bokeh,CrazyGuo/bokeh,dennisobrien/bokeh,matbra/bokeh,percyfal/bokeh,ericmjl/bokeh,CrazyGuo/bokeh,khkaminska/bokeh,aavanian/bokeh,phobson/bokeh,ChinaQuants/bokeh,timothydmorton/bokeh,ChinaQuants/bokeh,evidation-health/bokeh,draperjames/bokeh,aiguofer/bokeh,eteq/bokeh,awanke/bokeh,ptitjano/bokeh,DuCorey/bokeh,josherick/bokeh,carlvlewis/bokeh,josherick/bokeh,tacaswell/bokeh,dennisobrien/bokeh,birdsarah/bokeh,josherick/bokeh,saifrahmed/bokeh,akloster/bokeh,justacec/bokeh,PythonCharmers/bokeh,ChristosChristofidis/bokeh,aiguofer/bokeh,birdsarah/bokeh,khkaminska/bokeh,sahat/bokeh,bokeh/bokeh,PythonCharmers/bokeh,xguse/bokeh,lukebarnard1/bokeh,evidation-health/bokeh,DuCorey/bokeh,muku42/bokeh | bokeh/browserlib.py | bokeh/browserlib.py |
from os.path import abspath
import webbrowser
from . import settings
def get_browser_controller(browser=None):
browser = settings.browser(browser)
if browser is not None:
if browser == 'none':
class DummyWebBrowser(object):
def open(self, url, new=0, autoraise=True):
pass
controller = DummyWebBrowser()
else:
controller = webbrowser.get(browser)
else:
controller = webbrowser
return controller
def view(filename, browser=None, new=False, autoraise=True):
""" Opens a browser to view the file pointed to by this sessions.
**new** can be None, "tab", or "window" to view the file in the
existing page, a new tab, or a new windows. **autoraise** causes
the browser to be brought to the foreground; this may happen
automatically on some platforms regardless of the setting of this
variable.
"""
new_map = { False: 0, "window": 1, "tab": 2 }
file_url = "file://" + abspath(filename)
try:
controller = get_browser_controller(browser)
controller.open(file_url, new=new_map[new], autoraise=autoraise)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass | import webbrowser
from . import settings
def get_browser_controller(browser=None):
browser = settings.browser(browser)
if browser is not None:
if browser == 'none':
class DummyWebBrowser(object):
def open(self, url, new=0, autoraise=True):
pass
controller = DummyWebBrowser()
else:
controller = webbrowser.get(browser)
else:
controller = webbrowser
return controller
| bsd-3-clause | Python |
c8064d4fab988d42cb400bbe26965f3078522da4 | Fix brightcove id extraction | kidburglar/youtube-dl,dstftw/youtube-dl,ping/youtube-dl,yan12125/youtube-dl,spvkgn/youtube-dl,vinegret/youtube-dl,epitron/youtube-dl,erikdejonge/youtube-dl,Tatsh/youtube-dl,gkoelln/youtube-dl,vinegret/youtube-dl,ping/youtube-dl,rrooij/youtube-dl,rg3/youtube-dl,dstftw/youtube-dl,unreal666/youtube-dl,steebchen/youtube-dl,epitron/youtube-dl,hakatashi/youtube-dl,erikdejonge/youtube-dl,nyuszika7h/youtube-dl,ozburo/youtube-dl,remitamine/youtube-dl,aeph6Ee0/youtube-dl,vijayanandnandam/youtube-dl,Orochimarufan/youtube-dl,remitamine/youtube-dl,stannynuytkens/youtube-dl,Orochimarufan/youtube-dl,aeph6Ee0/youtube-dl,spvkgn/youtube-dl,stannynuytkens/youtube-dl,yan12125/youtube-dl,gkoelln/youtube-dl,rg3/youtube-dl,Tatsh/youtube-dl,nyuszika7h/youtube-dl,rrooij/youtube-dl,hakatashi/youtube-dl,steebchen/youtube-dl,vijayanandnandam/youtube-dl,kidburglar/youtube-dl,unreal666/youtube-dl,ozburo/youtube-dl | youtube_dl/extractor/gameinformer.py | youtube_dl/extractor/gameinformer.py | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class GameInformerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx'
_TEST = {
'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
'md5': '292f26da1ab4beb4c9099f1304d2b071',
'info_dict': {
'id': '4515472681001',
'ext': 'mp4',
'title': 'Replay - Animal Crossing',
'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
'timestamp': 1443457610,
'upload_date': '20150928',
'uploader_id': '694940074001',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_id = self._search_regex(
[r'<[^>]+\bid=["\']bc_(\d+)', r"getVideo\('[^']+video_id=(\d+)"],
webpage, 'brightcove id')
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew',
brightcove_id)
| # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class GameInformerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx'
_TEST = {
'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
'md5': '292f26da1ab4beb4c9099f1304d2b071',
'info_dict': {
'id': '4515472681001',
'ext': 'mp4',
'title': 'Replay - Animal Crossing',
'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
'timestamp': 1443457610,
'upload_date': '20150928',
'uploader_id': '694940074001',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_id = self._search_regex(r"getVideo\('[^']+video_id=(\d+)", webpage, 'brightcove id')
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
| unlicense | Python |
9207e5eccb91f43fab1477ac74e7cc7cdc350186 | Fix syntax error | flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost | fluxghost/api/camera.py | fluxghost/api/camera.py |
import logging
from fluxclient.robot.camera import FluxCamera
from fluxclient.utils.version import StrictVersion
from .control_base import control_base_mixin
CRITICAL_VERSION = StrictVersion("1.0")
logger = logging.getLogger("API.CAMERA")
"""
Control printer
Javascript Example:
ws = new WebSocket("ws://127.0.0.1:8000/ws/control/RLFPAPI7E8KXG64KG5NOWWY3T");
ws.onmessage = function(v) { console.log(v.data);}
ws.onclose = function(v) { console.log("CONNECTION CLOSED, code=" + v.code +
"; reason=" + v.reason); }
// After recive connected...
ws.send("ls")
"""
def camera_api_mixin(cls):
class CameraAPI(control_base_mixin(cls)):
def get_robot_from_device(self, device):
self.remote_version = device.version
return device.connect_camera(
self.client_key, conn_callback=self._conn_callback)
def get_robot_from_h2h(self, usbprotocol):
return FluxCamera.from_usb(self.client_key, usbprotocol)
def on_connected(self):
self.rlist.append(CameraWrapper(self, self.robot))
def on_command(self, message):
logger.info(message)
if self.remote_version > CRITICAL_VERSION:
if message == 'enable_streaming':
self.robot.enable_streaming()
if message == 'require_frame':
self.robot.require_frame()
def on_image(self, camera, image):
self.send_binary(image)
return CameraAPI
class CameraWrapper(object):
def __init__(self, ws, camera):
self.ws = ws
self.camera = camera
# TODO: `camera.sock.fileno()` to `camera.fileno()`
self._fileno = camera.sock.fileno()
def fileno(self):
return self._fileno
def on_read(self):
try:
self.camera.feed(self.ws.on_image)
except RuntimeError as e:
logger.info("Camera error: %s", e)
self.ws.close()
self.camera = None
self.ws = None
|
import logging
from fluxclient.robot.camera import FluxCamera
from fluxclient.utils.version import StrictVersion
from .control_base import control_base_mixin
CRITICAL_VERSION = StrictVersion("1.0")
logger = logging.getLogger("API.CAMERA")
"""
Control printer
Javascript Example:
ws = new WebSocket("ws://127.0.0.1:8000/ws/control/RLFPAPI7E8KXG64KG5NOWWY3T");
ws.onmessage = function(v) { console.log(v.data);}
ws.onclose = function(v) { console.log("CONNECTION CLOSED, code=" + v.code +
"; reason=" + v.reason); }
// After recive connected...
ws.send("ls")
"""
def camera_api_mixin(cls):
class CameraAPI(control_base_mixin(cls)):
def get_robot_from_device(self, device):
self.remote_version = device.version
return device.connect_camera(
self.client_key, conn_callback=self._conn_callback)
def get_robot_from_h2h(self, usbprotocol):
return FluxCamera.from_usb(self.client_key, usbprotocol)
def on_connected(self):
if self.remote_version > CRITICAL_VERSION:
self.rlist.append(CameraWrapper(self, self.robot))
def on_command(self, message):
logger.info(message)
if message == 'enable_streaming':
self.robot.enable_streaming()
if message == 'require_frame':
self.robot.require_frame()
def on_image(self, camera, image):
self.send_binary(image)
return CameraAPI
class CameraWrapper(object):
def __init__(self, ws, camera):
self.ws = ws
self.camera = camera
# TODO: `camera.sock.fileno()` to `camera.fileno()`
self._fileno = camera.sock.fileno()
def fileno(self):
return self._fileno
def on_read(self):
try:
self.camera.feed(self.ws.on_image)
except RuntimeError as e:
logger.info("Camera error: %s", e)
self.ws.close()
self.camera = None
self.ws = None
| agpl-3.0 | Python |
482f45b86066f6569faa90fab21d32b207f75d4d | Switch token/basic auth logging to INFO, to match login/logout logging. | wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,wwitzel3/awx | awx/api/authentication.py | awx/api/authentication.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
# Django
from django.conf import settings
from django.utils.encoding import smart_text
# Django REST Framework
from rest_framework import authentication
# Django OAuth Toolkit
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
logger = logging.getLogger('awx.api.authentication')
class LoggedBasicAuthentication(authentication.BasicAuthentication):
def authenticate(self, request):
if not settings.AUTH_BASIC_ENABLED:
return
ret = super(LoggedBasicAuthentication, self).authenticate(request)
if ret:
username = ret[0].username if ret[0] else '<none>'
logger.info(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path)))
return ret
def authenticate_header(self, request):
if not settings.AUTH_BASIC_ENABLED:
return
return super(LoggedBasicAuthentication, self).authenticate_header(request)
class SessionAuthentication(authentication.SessionAuthentication):
def authenticate_header(self, request):
return 'Session'
def enforce_csrf(self, request):
return None
class LoggedOAuth2Authentication(OAuth2Authentication):
def authenticate(self, request):
ret = super(LoggedOAuth2Authentication, self).authenticate(request)
if ret:
user, token = ret
username = user.username if user else '<none>'
logger.info(smart_text(
u"User {} performed a {} to {} through the API using OAuth token {}.".format(
username, request.method, request.path, token.pk
)
))
setattr(user, 'oauth_scopes', [x for x in token.scope.split() if x])
return ret
| # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
# Django
from django.conf import settings
from django.utils.encoding import smart_text
# Django REST Framework
from rest_framework import authentication
# Django OAuth Toolkit
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
logger = logging.getLogger('awx.api.authentication')
class LoggedBasicAuthentication(authentication.BasicAuthentication):
def authenticate(self, request):
if not settings.AUTH_BASIC_ENABLED:
return
ret = super(LoggedBasicAuthentication, self).authenticate(request)
if ret:
username = ret[0].username if ret[0] else '<none>'
logger.debug(smart_text(u"User {} performed a {} to {} through the API".format(username, request.method, request.path)))
return ret
def authenticate_header(self, request):
if not settings.AUTH_BASIC_ENABLED:
return
return super(LoggedBasicAuthentication, self).authenticate_header(request)
class SessionAuthentication(authentication.SessionAuthentication):
def authenticate_header(self, request):
return 'Session'
def enforce_csrf(self, request):
return None
class LoggedOAuth2Authentication(OAuth2Authentication):
def authenticate(self, request):
ret = super(LoggedOAuth2Authentication, self).authenticate(request)
if ret:
user, token = ret
username = user.username if user else '<none>'
logger.debug(smart_text(
u"User {} performed a {} to {} through the API using OAuth token {}.".format(
username, request.method, request.path, token.pk
)
))
setattr(user, 'oauth_scopes', [x for x in token.scope.split() if x])
return ret
| apache-2.0 | Python |
4c1ae5913b96bb9c201701fe330c9e8b97f43d41 | revert makeaddin.py to default behavior of placing the result in the local directory instead of the directory above as its less confusing. | genegis/genegis,genegis/genegis,genegis/genegis | makeaddin.py | makeaddin.py | import os
import re
import zipfile
current_path = os.path.dirname(os.path.abspath(__file__))
out_zip_name = os.path.join(current_path,
os.path.basename(current_path) + ".esriaddin")
BACKUP_FILE_PATTERN = re.compile(".*_addin_[0-9]+[.]py$", re.IGNORECASE)
def looks_like_a_backup(filename):
return bool(BACKUP_FILE_PATTERN.match(filename))
zip_file = zipfile.ZipFile(out_zip_name, 'w')
for filename in ('config.xml', 'README.md', 'makeaddin.py'):
zip_file.write(os.path.join(current_path, filename), filename)
dirs_to_add = ['Images', 'Install']
for directory in dirs_to_add:
for (path, dirs, files) in os.walk(os.path.join(current_path, directory)):
archive_path = os.path.relpath(path, current_path)
found_file = False
for file in (f for f in files if not looks_like_a_backup(f)):
archive_file = os.path.join(archive_path, file)
print archive_file
zip_file.write(os.path.join(path, file), archive_file)
found_file = True
if not found_file:
zip_file.writestr(os.path.join(archive_path, 'placeholder.txt'),
"(Empty directory)")
zip_file.close()
| import os
import re
import zipfile
current_path = os.path.dirname(os.path.abspath(__file__))
out_zip_name = os.path.join(current_path, "..",
os.path.basename(current_path) + ".esriaddin")
BACKUP_FILE_PATTERN = re.compile(".*_addin_[0-9]+[.]py$", re.IGNORECASE)
def looks_like_a_backup(filename):
return bool(BACKUP_FILE_PATTERN.match(filename))
zip_file = zipfile.ZipFile(out_zip_name, 'w')
for filename in ('config.xml', 'README.md', 'makeaddin.py'):
zip_file.write(os.path.join(current_path, filename), filename)
dirs_to_add = ['Images', 'Install']
for directory in dirs_to_add:
for (path, dirs, files) in os.walk(os.path.join(current_path, directory)):
archive_path = os.path.relpath(path, current_path)
found_file = False
for file in (f for f in files if not looks_like_a_backup(f)):
archive_file = os.path.join(archive_path, file)
print archive_file
zip_file.write(os.path.join(path, file), archive_file)
found_file = True
if not found_file:
zip_file.writestr(os.path.join(archive_path, 'placeholder.txt'),
"(Empty directory)")
zip_file.close()
| mpl-2.0 | Python |
07c5e0c700af22c22cb12cef62a2b8f3f7f70030 | bump version to next release alpha | dsanders11/django-advanced-filters,PreppyLLC-opensource/django-advanced-filters,modlinltd/django-advanced-filters,modlinltd/django-advanced-filters,modlinltd/django-advanced-filters,PreppyLLC-opensource/django-advanced-filters,dsanders11/django-advanced-filters,dsanders11/django-advanced-filters,PreppyLLC-opensource/django-advanced-filters,dsanders11/django-advanced-filters | advanced_filters/__init__.py | advanced_filters/__init__.py | __version__ = '1.0.2a'
| __version__ = '1.0.1'
| mit | Python |
e33b8a804b5af0421376a236ec4c56d7743c13ff | Bump to version 0.60.4 | nerevu/riko,nerevu/riko | riko/__init__.py | riko/__init__.py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko
~~~~
Provides functions for analyzing and processing streams of structured data
Examples:
basic usage::
>>> from itertools import chain
>>> from functools import partial
>>> from riko.modules import itembuilder, strreplace
>>> from riko.collections import SyncPipe
>>>
>>> ib_conf = {
... 'attrs': [
... {'key': 'link', 'value': 'www.google.com', },
... {'key': 'title', 'value': 'google', },
... {'key': 'author', 'value': 'Tommy'}]}
>>>
>>> sr_conf = {
... 'rule': [{'find': 'Tom', 'param': 'first', 'replace': 'Tim'}]}
>>>
>>> items = itembuilder.pipe(conf=ib_conf)
>>> pipe = partial(strreplace.pipe, conf=sr_conf, field='author')
>>> replaced = map(pipe, items)
>>> next(chain.from_iterable(replaced)) == {
... 'link': 'www.google.com', 'title': 'google',
... 'strreplace': 'Timmy', 'author': 'Tommy'}
True
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
from os import path as p
from builtins import * # noqa pylint: disable=unused-import
__version__ = '0.60.4'
__title__ = 'riko'
__package_name__ = 'riko'
__author__ = 'Reuben Cummings'
__description__ = 'A stream processing engine modeled after Yahoo! Pipes.'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
PARENT_DIR = p.abspath(p.dirname(__file__))
ENCODING = 'utf-8'
def get_path(name):
return 'file://%s' % p.join(PARENT_DIR, 'data', name)
| # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko
~~~~
Provides functions for analyzing and processing streams of structured data
Examples:
basic usage::
>>> from itertools import chain
>>> from functools import partial
>>> from riko.modules import itembuilder, strreplace
>>> from riko.collections import SyncPipe
>>>
>>> ib_conf = {
... 'attrs': [
... {'key': 'link', 'value': 'www.google.com', },
... {'key': 'title', 'value': 'google', },
... {'key': 'author', 'value': 'Tommy'}]}
>>>
>>> sr_conf = {
... 'rule': [{'find': 'Tom', 'param': 'first', 'replace': 'Tim'}]}
>>>
>>> items = itembuilder.pipe(conf=ib_conf)
>>> pipe = partial(strreplace.pipe, conf=sr_conf, field='author')
>>> replaced = map(pipe, items)
>>> next(chain.from_iterable(replaced)) == {
... 'link': 'www.google.com', 'title': 'google',
... 'strreplace': 'Timmy', 'author': 'Tommy'}
True
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
from os import path as p
from builtins import * # noqa pylint: disable=unused-import
__version__ = '0.60.3'
__title__ = 'riko'
__package_name__ = 'riko'
__author__ = 'Reuben Cummings'
__description__ = 'A stream processing engine modeled after Yahoo! Pipes.'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
PARENT_DIR = p.abspath(p.dirname(__file__))
ENCODING = 'utf-8'
def get_path(name):
return 'file://%s' % p.join(PARENT_DIR, 'data', name)
| mit | Python |
212720ff677985f57a0f26e073df9bad6dc5c9c0 | Add ClassifierBaseScoreType to score_types.__init__ (#281) | paris-saclay-cds/ramp-workflow,paris-saclay-cds/ramp-workflow | rampwf/score_types/__init__.py | rampwf/score_types/__init__.py | from .accuracy import Accuracy
from .balanced_accuracy import BalancedAccuracy
from .base import BaseScoreType
from .brier_score import (
BrierScore, BrierSkillScore, BrierScoreReliability, BrierScoreResolution)
from .clustering_efficiency import ClusteringEfficiency
from .classification_error import ClassificationError
from .classifier_base import ClassifierBaseScoreType
from .combined import Combined
from .detection import (
OSPA, SCP, DetectionPrecision, DetectionRecall, MADCenter, MADRadius,
AverageDetectionPrecision, DetectionAveragePrecision)
from .f1_above import F1Above
from .macro_averaged_recall import MacroAveragedRecall
from .make_combined import MakeCombined
from .mare import MARE
from .negative_log_likelihood import NegativeLogLikelihood
from .normalized_gini import NormalizedGini
from .normalized_rmse import NormalizedRMSE
from .relative_rmse import RelativeRMSE
from .rmse import RMSE
from .roc_auc import ROCAUC
from .soft_accuracy import SoftAccuracy
__all__ = [
'Accuracy',
'BalancedAccuracy',
'BaseScoreType',
'BrierScore',
'BrierScoreReliability',
'BrierScoreResolution',
'BrierSkillScore',
'ClassificationError',
'ClassifierBaseScoreType',
'ClusteringEfficiency',
'Combined',
'DetectionPrecision',
'DetectionRecall',
'DetectionAveragePrecision',
'F1Above',
'MacroAveragedRecall',
'MakeCombined',
'MADCenter',
'MADRadius',
'MARE',
'NegativeLogLikelihood',
'NormalizedGini',
'NormalizedRMSE',
'OSPA',
'RelativeRMSE',
'RMSE',
'ROCAUC',
'SCP',
'SoftAccuracy',
]
| from .accuracy import Accuracy
from .balanced_accuracy import BalancedAccuracy
from .base import BaseScoreType
from .brier_score import (
BrierScore, BrierSkillScore, BrierScoreReliability, BrierScoreResolution)
from .clustering_efficiency import ClusteringEfficiency
from .classification_error import ClassificationError
from .combined import Combined
from .detection import (
OSPA, SCP, DetectionPrecision, DetectionRecall, MADCenter, MADRadius,
AverageDetectionPrecision, DetectionAveragePrecision)
from .f1_above import F1Above
from .macro_averaged_recall import MacroAveragedRecall
from .make_combined import MakeCombined
from .mare import MARE
from .negative_log_likelihood import NegativeLogLikelihood
from .normalized_gini import NormalizedGini
from .normalized_rmse import NormalizedRMSE
from .relative_rmse import RelativeRMSE
from .rmse import RMSE
from .roc_auc import ROCAUC
from .soft_accuracy import SoftAccuracy
__all__ = [
'Accuracy',
'BalancedAccuracy',
'BaseScoreType',
'BrierScore',
'BrierScoreReliability',
'BrierScoreResolution',
'BrierSkillScore',
'ClassificationError',
'ClusteringEfficiency',
'Combined',
'DetectionPrecision',
'DetectionRecall',
'DetectionAveragePrecision',
'F1Above',
'MacroAveragedRecall',
'MakeCombined',
'MADCenter',
'MADRadius',
'MARE',
'NegativeLogLikelihood',
'NormalizedGini',
'NormalizedRMSE',
'OSPA',
'RelativeRMSE',
'RMSE',
'ROCAUC',
'SCP',
'SoftAccuracy',
]
| bsd-3-clause | Python |
231fe04d13bd7f86445ab5163301ca04f1c54752 | enable empty run list and empty attributes | Fewbytes/cosmo-plugin-chef-appmodule-installer | chef_appmodule_installer/tasks.py | chef_appmodule_installer/tasks.py | #/*******************************************************************************
# * Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *******************************************************************************/
"""
Celery tasks for running recipes through chef-client.
This file implements the AppModule.installer interface, where for each of tasks, we check that chef is configured
and run the relevant runlist using the chef_client module.
"""
from cosmo.events import send_event, get_cosmo_properties
from cosmo.celery import celery
from chef_client_common.chef_client import set_up_chef_client, run_chef
@celery.task
@set_up_chef_client
def deploy(chef_deploy_runlist=None, chef_attributes=None, **kwargs):
run_chef(chef_deploy_runlist, chef_attributes)
@celery.task
@set_up_chef_client
def undeploy(chef_undeploy_runlist=None, chef_attributes=None, **kwargs):
run_chef(chef_undeploy_runlist, chef_attributes)
@celery.task
@set_up_chef_client
def start(__cloudify_id, policy_service, chef_start_runlist=None, chef_attributes=None, **kwargs):
run_chef(chef_start_runlist, chef_attributes)
host = get_cosmo_properties()['ip']
send_event(__cloudify_id, host, policy_service, "state", "running")
@celery.task
@set_up_chef_client
def stop(chef_stop_runlist=None, chef_attributes=None, **kwargs):
run_chef(chef_stop_runlist, chef_attributes)
| #/*******************************************************************************
# * Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *******************************************************************************/
"""
Celery tasks for running recipes through chef-client.
This file implements the AppModule.installer interface, where for each of tasks, we check that chef is configured
and run the relevant runlist using the chef_client module.
"""
from cosmo.events import send_event, get_cosmo_properties
from cosmo.celery import celery
from chef_client_common.chef_client import set_up_chef_client, run_chef
@celery.task
@set_up_chef_client
def deploy(chef_deploy_runlist, chef_attributes, **kwargs):
run_chef(chef_deploy_runlist, chef_attributes)
@celery.task
@set_up_chef_client
def undeploy(chef_undeploy_runlist, chef_attributes, **kwargs):
run_chef(chef_undeploy_runlist, chef_attributes)
@celery.task
@set_up_chef_client
def start(__cloudify_id, chef_start_runlist, chef_attributes, policy_service, **kwargs):
run_chef(chef_start_runlist, chef_attributes)
host = get_cosmo_properties()['ip']
send_event(__cloudify_id, host, policy_service, "state", "running")
@celery.task
@set_up_chef_client
def stop(chef_stop_runlist, chef_attributes, **kwargs):
run_chef(chef_stop_runlist, chef_attributes)
| apache-2.0 | Python |
b69cbd31b2a75e4bdd1592977a13bf5c80795a17 | Fix objects.node.atoms.cmd | Tendrl/node-agent,Tendrl/node_agent,Tendrl/node-agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node_agent,r0h4n/node-agent,r0h4n/node-agent | tendrl/node_agent/objects/node/atoms/cmd.py | tendrl/node_agent/objects/node/atoms/cmd.py | import logging
import subprocess
LOG = logging.getLogger(__name__)
class Cmd(object):
def run(self, parameters):
cmd = parameters.get("Node.cmd_str")
cmd = ["nohup"] + cmd.split(" ")
subprocess.Popen(cmd)
return True | import logging
from tendrl.node_agent.ansible_runner.ansible_module_runner \
import AnsibleExecutableGenerationFailed
from tendrl.node_agent.ansible_runner.ansible_module_runner \
import AnsibleRunner
LOG = logging.getLogger(__name__)
ANSIBLE_MODULE_PATH = "core/commands/command.py"
class Cmd(object):
def run(self, parameters):
cmd = parameters.get("Node.cmd_str")
try:
runner = AnsibleRunner(
ANSIBLE_MODULE_PATH,
_raw_params=cmd
)
result, err = runner.run()
except AnsibleExecutableGenerationFailed as ex:
LOG.error(ex)
return False
LOG.info(result)
return True
| lgpl-2.1 | Python |
e8f5a85ffd039c3c63045c0e06a41b98d98008d3 | update package | mfherbst/spack,lgarren/spack,matthiasdiener/spack,tmerrick1/spack,skosukhin/spack,EmreAtes/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,TheTimmy/spack,LLNL/spack,LLNL/spack,mfherbst/spack,tmerrick1/spack,EmreAtes/spack,lgarren/spack,lgarren/spack,matthiasdiener/spack,lgarren/spack,TheTimmy/spack,krafczyk/spack,EmreAtes/spack,EmreAtes/spack,matthiasdiener/spack,TheTimmy/spack,skosukhin/spack,matthiasdiener/spack,TheTimmy/spack,krafczyk/spack,tmerrick1/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,lgarren/spack,krafczyk/spack,skosukhin/spack,krafczyk/spack,iulian787/spack,iulian787/spack,skosukhin/spack,mfherbst/spack,LLNL/spack,mfherbst/spack,skosukhin/spack,tmerrick1/spack,iulian787/spack,EmreAtes/spack,TheTimmy/spack,krafczyk/spack,iulian787/spack | var/spack/packages/ncurses/package.py | var/spack/packages/ncurses/package.py | from spack import *
class Ncurses(Package):
"""The ncurses (new curses) library is a free software emulation of curses
in System V Release 4.0, and more. It uses terminfo format, supports pads and
color and multiple highlights and forms characters and function-key mapping,
and has all the other SYSV-curses enhancements over BSD curses.
"""
homepage = "http://invisible-island.net/ncurses/ncurses.html"
version('5.9', '8cb9c412e5f2d96bc6f459aa8c6282a1',
url='http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz')
version('6.0', 'ee13d052e1ead260d7c28071f46eefb1',
url='http://ftp.gnu.org/pub/gnu/ncurses/ncurses-6.0.tar.gz')
def install(self, spec, prefix):
configure("--prefix=%s" % prefix,
"--with-shared",
"--enable-widec",
"--disable-pc-files",
"--without-ada")
make()
make("install")
configure("--prefix=%s" % prefix,
"--with-shared",
"--disable-widec",
"--disable-pc-files",
"--without-ada")
make()
make("install")
| from spack import *
class Ncurses(Package):
"""The ncurses (new curses) library is a free software emulation of curses
in System V Release 4.0, and more. It uses terminfo format, supports pads and
color and multiple highlights and forms characters and function-key mapping,
and has all the other SYSV-curses enhancements over BSD curses.
"""
homepage = "http://invisible-island.net/ncurses/ncurses.html"
version('5.9', '8cb9c412e5f2d96bc6f459aa8c6282a1',
url='http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz')
def install(self, spec, prefix):
configure("--prefix=%s" % prefix,
"--with-shared",
"--enable-widec",
"--disable-pc-files",
"--without-ada")
make()
make("install")
configure("--prefix=%s" % prefix,
"--with-shared",
"--disable-widec",
"--disable-pc-files",
"--without-ada")
make()
make("install")
| lgpl-2.1 | Python |
382a3f673c4d74f6ba2b8c4ccff9332445b76093 | Fix resource category help text. | cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website | chmvh_website/resources/models.py | chmvh_website/resources/models.py | from django.db import models
class Category(models.Model):
"""A category of resources."""
important = models.BooleanField(
default=False,
help_text=('Categories marked important will be shown at the top of '
'the resource list'),
verbose_name='important')
title = models.CharField(
max_length=100,
unique=True,
verbose_name='title')
class Meta:
ordering = ('title',)
verbose_name_plural = 'categories'
def __str__(self):
"""Return the category's title"""
return self.title
class Resource(models.Model):
"""A resource containing various information."""
address = models.TextField(
blank=True,
verbose_name='address')
category = models.ForeignKey(
to='Category',
verbose_name='resource category')
description = models.TextField(
blank=True,
verbose_name='description')
email = models.EmailField(
blank=True,
verbose_name='email address')
phone = models.CharField(
blank=True,
max_length=50,
verbose_name='phone number')
title = models.CharField(
max_length=100,
unique=True,
verbose_name='title')
url = models.URLField(
blank=True,
verbose_name='website URL')
class Meta:
ordering = ('title',)
def __str__(self):
"""Return the resource's title"""
return self.title
| from django.db import models
class Category(models.Model):
"""A category of resources."""
important = models.BooleanField(
default=False,
help_text=('categories marked important will be shown at the top of ',
'the resource list'),
verbose_name='important')
title = models.CharField(
max_length=100,
unique=True,
verbose_name='title')
class Meta:
ordering = ('title',)
verbose_name_plural = 'categories'
def __str__(self):
"""Return the category's title"""
return self.title
class Resource(models.Model):
"""A resource containing various information."""
address = models.TextField(
blank=True,
verbose_name='address')
category = models.ForeignKey(
to='Category',
verbose_name='resource category')
description = models.TextField(
blank=True,
verbose_name='description')
email = models.EmailField(
blank=True,
verbose_name='email address')
phone = models.CharField(
blank=True,
max_length=50,
verbose_name='phone number')
title = models.CharField(
max_length=100,
unique=True,
verbose_name='title')
url = models.URLField(
blank=True,
verbose_name='website URL')
class Meta:
ordering = ('title',)
def __str__(self):
"""Return the resource's title"""
return self.title
| mit | Python |
e757c0bc72bd508a2a8fddda1900e71b15383cdf | Fix for failing query. | ICT4H/dcs-mangrove | georegistry/api.py | georegistry/api.py | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import urllib
import json
from mangrove.utils.types import is_string, is_number
GEOREGISTRY_API_BASE_URL = 'http://ni-api.georegistry.org/api/1.0'
GEOREGISTRY_API_DEFAULT_LIMIT = 50
GEOREGISTRY_NUM_HTTP_ATTEMPS = 5
def get_locations_tree(country_code, limit=GEOREGISTRY_API_DEFAULT_LIMIT):
assert is_string(country_code)
assert is_number(int(limit))
return _query('/features/locations', country_code=country_code, limit=limit)
def get_feature_by_id(id):
assert is_string(id)
query = _query('/feature/%s.json' % id)
if query:
return query['features'][0]
else:
return None
def _query(url, **params):
params = urllib.urlencode(params)
ret_val = False
for t in range(0, GEOREGISTRY_NUM_HTTP_ATTEMPS):
try:
query = GEOREGISTRY_API_BASE_URL + url + '?%s' % params
data = urllib.urlopen(query)
if data.getcode() == 200:
ret_val = json.loads(data.read())
break
except IOError as e:
print e.message
return ret_val
| # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import urllib
import json
from mangrove.utils.types import is_string, is_number
GEOREGISTRY_API_BASE_URL = 'http://ni-api.georegistry.org/api/1.0'
GEOREGISTRY_API_DEFAULT_LIMIT = 50
GEOREGISTRY_NUM_HTTP_ATTEMPS = 5
def get_locations_tree(country_code, limit=GEOREGISTRY_API_DEFAULT_LIMIT):
assert is_string(country_code)
assert is_number(int(limit))
return _query('/features/locations', country_code=country_code, limit=limit)
def get_feature_by_id(id):
assert is_string(id)
return _query('/feature/%s.json' % id)['features'][0]
def _query(url, **params):
params = urllib.urlencode(params)
ret_val = False
for t in range(0, GEOREGISTRY_NUM_HTTP_ATTEMPS):
try:
query = GEOREGISTRY_API_BASE_URL + url + '?%s' % params
data = urllib.urlopen(query)
print '[%s] ...' % (t + 1)
if data.getcode() == 200:
ret_val = json.loads(data.read())
break
except IOError as e:
print e.message
print 'Query was: %s' % query
return ret_val
| bsd-3-clause | Python |
91b6025b2706d9ca993627d20a9d4f769a4f3276 | fix scale | str4d/i2p-tools,str4d/i2p-tools,str4d/i2p-tools,majestrate/i2p-tools,majestrate/i2p-tools,majestrate/i2p-tools,majestrate/i2p-tools,str4d/i2p-tools,majestrate/i2p-tools | munin/i2pmon.py | munin/i2pmon.py | #!/usr/bin/env python
#
# i2pcontrol munin plugin
#
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'config':
print ('graph_title I2P Bandwidth')
print ('graph_order down up')
print ('graph_category network')
print ('graph_vlabel bits in (-) / out (+) per ${graph_period}')
print ('down.label bps')
print ('down.type COUNTER')
print ('down.cdef down,8,*')
print ('up.label bps')
print ('up.type COUNTER')
print ('up.negative down')
print ('up.cdef up,8,*')
else:
import i2pcontrol
ctl = i2pcontrol.I2PController(use_ssl=False)
info = ctl.getRouterInfo()
print ('down.value {}'.format(int(info['i2p.router.net.bw.inbound.1s'])))
print ('up.value {}'.format(int(info['i2p.router.net.bw.outbound.1s'])))
| #!/usr/bin/env python
#
# i2pcontrol munin plugin
#
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'config':
print ('graph_title I2P Bandwidth')
print ('graph_order down up')
print ('graph_category network')
print ('graph_args --base 1000')
print ('graph_vlabel bits in (-) / out (+) per ${graph_period}')
print ('down.label bps')
print ('down.type COUNTER')
print ('down.cdef down,8,*')
print ('up.label bps')
print ('up.type COUNTER')
print ('up.negative down')
print ('up.cdef up,8,*')
else:
import i2pcontrol
ctl = i2pcontrol.I2PController(use_ssl=False)
info = ctl.getRouterInfo()
print ('down.value {}'.format(int(info['i2p.router.net.bw.inbound.1s'])))
print ('up.value {}'.format(int(info['i2p.router.net.bw.outbound.1s'])))
| mit | Python |
68eeae8d008a2ae6db3923a1f6d19a5f9cb54e04 | Update hiptnt.py | dbeyer/benchexec,sosy-lab/benchexec,dbeyer/benchexec,IljaZakharov/benchexec,martin-neuhaeusser/benchexec,IljaZakharov/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,martin-neuhaeusser/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,martin-neuhaeusser/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,IljaZakharov/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,martin-neuhaeusser/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,IljaZakharov/benchexec | benchexec/tools/hiptnt.py | benchexec/tools/hiptnt.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable('hiptnt.sh')
def name(self):
return 'HipTNT+'
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
if "YES" in output:
return result.RESULT_TRUE_PROP
elif "TRUE" in output:
return result.RESULT_TRUE_PROP
elif "FALSE" in output:
return result.RESULT_FALSE_TERMINATION
elif "NO" in output:
return result.RESULT_FALSE_TERMINATION
else:
return result.RESULT_UNKNOWN
return status
| """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable('HipTNT.sh')
def name(self):
return 'HipTNT+'
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = '\n'.join(output)
if "YES" in output:
return result.RESULT_TRUE_PROP
elif "TRUE" in output:
return result.RESULT_TRUE_PROP
elif "FALSE" in output:
return result.RESULT_FALSE_TERMINATION
elif "NO" in output:
return result.RESULT_FALSE_TERMINATION
else:
return result.RESULT_UNKNOWN
return status
| apache-2.0 | Python |
94f0b65504424786bab736c87fbff57e61bb39d4 | Replace the last occurrence of .s with .h | sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc | build/generate_asm_header.py | build/generate_asm_header.py | #!/usr/bin/env python
#
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This script is a tool to generate special header files from input
C source files.
It first assembles the input source files to generate intermediate assembly
files (*.s). Then it parses the .s files and finds declarations of variables
whose names start with the string specified as the third argument in the
command-line, translates the variable names and values into constant defines
and writes them into header files.
"""
import os
import sys
import subprocess
from optparse import OptionParser
def main(argv):
parser = OptionParser()
usage = 'Usage: %prog [options] input_filename'
parser.set_usage(usage)
parser.add_option('--compiler', default = 'gcc', help = 'compiler name')
parser.add_option('--options', default = '-S', help = 'compiler options')
parser.add_option('--pattern', default = 'offset_', help = 'A match pattern'
' used for searching the relevant constants.')
parser.add_option('--dir', default = '.', help = 'output directory')
(options, args) = parser.parse_args()
# Generate complete intermediate and header file names.
input_filename = args[0]
output_root = (options.dir + '/' +
os.path.splitext(os.path.basename(input_filename))[0])
interim_filename = output_root + '.s'
out_filename = output_root + '.h'
# Set the shell command with the compiler and options inputs.
compiler_command = (options.compiler + " " + options.options + " " +
input_filename + " -o " + interim_filename)
# Run the shell command and generate the intermediate file.
subprocess.check_call(compiler_command, shell=True)
interim_file = open(interim_filename) # The intermediate file.
out_file = open(out_filename, 'w') # The output header file.
# Generate the output header file.
for line in interim_file: # Iterate though all the lines in the input file.
if line.startswith(options.pattern):
out_file.write('#define ')
out_file.write(line.split(':')[0]) # Write the constant name.
out_file.write(' ')
if line.find('.word') >= 0:
out_file.write(line.split('.word')[1]) # Write the constant value.
interim_file.close()
out_file.close()
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python
#
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""This script is a tool to generate special header files from input
C source files.
It first assembles the input source files to generate intermediate assembly
files (*.s). Then it parses the .s files and finds declarations of variables
whose names start with the string specified as the third argument in the
command-line, translates the variable names and values into constant defines
and writes them into header files.
"""
import os
import sys
import subprocess
from optparse import OptionParser
def main(argv):
parser = OptionParser()
usage = 'Usage: %prog [options] input_file'
parser.set_usage(usage)
parser.add_option('--compiler', default = 'gcc', help = 'compiler name')
parser.add_option('--options', default = '-S', help = 'compiler options')
parser.add_option('--pattern', default = 'offset_', help = 'A match pattern'
' used for searching the relevant constants.')
parser.add_option('--dir', default = '.', help = 'output directory')
(options, args) = parser.parse_args()
# Generate complete intermediate and header file names.
input_file_name = os.path.basename(args[0])
file_base_name = os.path.splitext(input_file_name)[0]
interim_file = options.dir + "/" + file_base_name + '.s'
out_file = interim_file.replace('.s', '.h')
# Set the shell command with the compiler and options inputs.
compiler_command = (options.compiler + " " + options.options + " " + args[0]
+ " -o " + interim_file)
# Run the shell command and generate the intermediate file.
subprocess.check_call(compiler_command, shell=True)
infile = open(interim_file) # The intermediate file.
outfile = open(out_file, 'w') # The output header file.
# Generate the output header file.
for line in infile: # Iterate though all the lines in the input file.
if line.startswith(options.pattern):
outfile.write('#define ')
outfile.write(line.split(':')[0]) # Write the constant name.
outfile.write(' ')
if line.find('.word') >= 0:
outfile.write(line.split('.word')[1]) # Write the constant value.
infile.close()
outfile.close()
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-3-clause | Python |
d5d37914fc613c17804418fbdeedde562bad1908 | use empty ip to specify all interfaces | rueberger/janelia_jupyterhub | janelia_jupyterhub/jupyterhub_config.py | janelia_jupyterhub/jupyterhub_config.py | # Configuration file for jupyterhub.
c.JupyterHub.authenticator_class = 'ldapauthenticator.LDAPAuthenticator'
# Connect containers to this Docker network
network_name = 'jhub'
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config = { 'network_mode': network_name }
c.DockerSpawner.extra_start_kwargs = { 'network_mode': network_name }
# User containers will access hub by container name on the Docker network
c.JupyterHub.hub_ip = ''
c.JupyterHub.hub_port = 8080
c.DockerSpawner.container_ip = '0.0.0.0'
c.DockerSpawner.hub_ip_connect = 'jhub'
## The public facing ip of the whole application (the proxy)
c.JupyterHub.ip = ''
## The class to use for spawning single-user servers.
#
# Should be a subclass of Spawner.
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
#------------------------------------------------------------------------------
# LDAP config
#------------------------------------------------------------------------------
c.LDAPAuthenticator.server_address = 'ldap-vip1.int.janelia.org'
c.LDAPAuthenticator.bind_dn_template = 'cn={username},ou=People,dc=hhmi,dc=org'
c.LDAPAuthenticator.use_ssl = False
| # Configuration file for jupyterhub.
c.JupyterHub.authenticator_class = 'ldapauthenticator.LDAPAuthenticator'
# Connect containers to this Docker network
network_name = 'jhub'
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config = { 'network_mode': network_name }
c.DockerSpawner.extra_start_kwargs = { 'network_mode': network_name }
# User containers will access hub by container name on the Docker network
c.JupyterHub.hub_ip = '*'
c.JupyterHub.hub_port = 8080
c.DockerSpawner.container_ip = '0.0.0.0'
c.DockerSpawner.hub_ip_connect = 'jhub'
## The public facing ip of the whole application (the proxy)
c.JupyterHub.ip = '*'
## The class to use for spawning single-user servers.
#
# Should be a subclass of Spawner.
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
#------------------------------------------------------------------------------
# LDAP config
#------------------------------------------------------------------------------
c.LDAPAuthenticator.server_address = 'ldap-vip1.int.janelia.org'
c.LDAPAuthenticator.bind_dn_template = 'cn={username},ou=People,dc=hhmi,dc=org'
c.LDAPAuthenticator.use_ssl = False
| mit | Python |
26e9800e3c31892a1ddc584903bf5ef646b1a633 | Add seen command | mgracik/robinette | robinette/irc.py | robinette/irc.py | from dateutil import tz
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
import bson
from nltk.chat.eliza import eliza_chatbot as chatbot
import pymongo
from xmlrpc.server import BaseHandler
from xmlrpc.util import signature
class IRC(BaseHandler):
def __init__(self, chatbot):
self._mongo_conn = pymongo.MongoClient()
self._chatbot = chatbot
def log(self, data):
log.debug('Logged %s', data)
event = {
'_id': bson.ObjectId(),
'user': data['user'],
'channel': data['channel'],
'msg': data['msg']
}
db = self._mongo_conn.event_db
db.events.insert(event, w=1)
@signature(args=['string'], returns='string')
def respond(self, msg):
"""
Respond to the message.
"""
return self._chatbot.respond(msg)
@signature(args=['string'], returns='string')
def seen(self, nick):
"""
Return the last time a user was seen.
"""
db = self._mongo_conn.event_db
messages = db.events.find(
{'user': {'$regex': '^%s' % nick, '$options': 'i'}}
)
# Get latest.
messages = list(messages.sort([('_id', -1)]).limit(1))
if messages:
msg = messages[0]
return '%s was last seen on %s, saying: %s' % (
nick,
msg['_id'].generation_time.astimezone(tz.tzlocal()).strftime('%a %b %d %X'),
msg['msg']
)
else:
return 'I have not seen %s' % nick
@signature(args=['int', 'int'], returns='int')
def add(self, a, b):
"""
Add two integers.
"""
try:
a, b = int(a), int(b)
except ValueError:
return None
return a + b
@signature(args=['int', 'int'], returns='int')
def sub(self, a, b):
"""
Subtract two integers.
"""
try:
a, b = int(a), int(b)
except ValueError:
return None
return a - b
irc = IRC(chatbot)
| import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
import bson
from nltk.chat.eliza import eliza_chatbot as chatbot
import pymongo
from xmlrpc.server import BaseHandler
from xmlrpc.util import signature
class IRC(BaseHandler):
def __init__(self, chatbot):
self._mongo_conn = pymongo.MongoClient()
self._chatbot = chatbot
def log(self, data):
log.debug('Logged %s', data)
event = {
'_id': bson.ObjectId(),
'user': data['user'],
'channel': data['channel'],
'msg': data['msg']
}
db = self._mongo_conn.event_db
db.events.insert(event, w=1)
@signature(args=['string'], returns='string')
def respond(self, msg):
"""
Respond to the message.
"""
return self._chatbot.respond(msg)
@signature(args=['int', 'int'], returns='int')
def add(self, a, b):
"""
Add two integers.
"""
try:
a, b = int(a), int(b)
except ValueError:
return None
return a + b
@signature(args=['int', 'int'], returns='int')
def sub(self, a, b):
"""
Subtract two integers.
"""
try:
a, b = int(a), int(b)
except ValueError:
return None
return a - b
irc = IRC(chatbot)
| mit | Python |
636f29374561148f167ff76f4a6dc23ee2e57d95 | remove ipdb | romaintha/twitter | twitter/streaming_api.py | twitter/streaming_api.py | import logging
import time
import tweepy
from twitter import settings
class Streaming:
def __init__(self,
pipeline,
batch_size=1000,
consumer_key=settings.CONSUMER_TOKEN,
consumer_secret=settings.CONSUMER_SECRET,
acces_token=settings.ACCESS_TOKEN,
access_secret=settings.ACCESS_SECRET,):
self.auth = tweepy.OAuthHandler(consumer_key=consumer_key, consumer_secret=consumer_secret)
self.auth.set_access_token(acces_token, access_secret)
self.stream = tweepy.Stream(auth=self.auth, listener=Listener(pipeline, batch_size=batch_size))
logging.basicConfig(filename='log_twitter.txt', level=logging.DEBUG)
def start_streaming(self,to_track=settings.TO_TRACK):
while True:
try:
self.stream.filter(track=to_track)
except Exception as e:
logging.exception('stream filter')
time.sleep(3)
class Listener(tweepy.StreamListener):
def __init__(self, pipeline, batch_size):
super(Listener, self).__init__()
self.pipeline = pipeline
self.tweets = list()
self.batch_size = batch_size
def on_status(self, status):
if not self.tweet_filter(status):
return
self.tweets.append(status)
if len(self.tweets) >= self.batch_size:
data = self.pipeline(self.tweets)
self.tweets = list()
def on_error(self, status_code):
logging.debug('error: %s', status_code)
if status_code == 420:
time.time(61*15)
def on_timeout(self):
logging.debug('timeout')
def on_limit(self, track):
logging.debug('limit: %s', track)
@staticmethod
def tweet_filter(status):
if hasattr(status, 'lang') and status.lang != 'fr':
return False
return True
| import logging
import time
import tweepy
from twitter import settings
class Streaming:
def __init__(self,
pipeline,
batch_size=1000,
consumer_key=settings.CONSUMER_TOKEN,
consumer_secret=settings.CONSUMER_SECRET,
acces_token=settings.ACCESS_TOKEN,
access_secret=settings.ACCESS_SECRET,):
self.auth = tweepy.OAuthHandler(consumer_key=consumer_key, consumer_secret=consumer_secret)
self.auth.set_access_token(acces_token, access_secret)
self.stream = tweepy.Stream(auth=self.auth, listener=Listener(pipeline, batch_size=batch_size))
logging.basicConfig(filename='log_twitter.txt', level=logging.DEBUG)
def start_streaming(self,to_track=settings.TO_TRACK):
while True:
try:
self.stream.filter(track=to_track)
except Exception as e:
import ipdb; ipdb.set_trace()
logging.exception('stream filter')
time.sleep(3)
class Listener(tweepy.StreamListener):
def __init__(self, pipeline, batch_size):
super(Listener, self).__init__()
self.pipeline = pipeline
self.tweets = list()
self.batch_size = batch_size
def on_status(self, status):
if not self.tweet_filter(status):
return
self.tweets.append(status)
if len(self.tweets) >= self.batch_size:
data = self.pipeline(self.tweets)
self.tweets = list()
def on_error(self, status_code):
logging.debug('error: %s', status_code)
if status_code == 420:
time.time(61*15)
def on_timeout(self):
logging.debug('timeout')
def on_limit(self, track):
logging.debug('limit: %s', track)
@staticmethod
def tweet_filter(status):
if hasattr(status, 'lang') and status.lang != 'fr':
return False
return True
| mit | Python |
84b22370caeb6fd493d9238922cc39c6c12f24d2 | Fix redirect admin | rtfd/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,tddv/readthedocs.org,rtfd/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,pombredanne/readthedocs.org,espdev/readthedocs.org,safwanrahman/readthedocs.org,pombredanne/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org | readthedocs/redirects/admin.py | readthedocs/redirects/admin.py | from __future__ import absolute_import
from django.contrib import admin
from .models import Redirect
class RedirectAdmin(admin.ModelAdmin):
list_display = ['project', 'redirect_type', 'from_url', 'to_url']
raw_id_fields = ('project',)
admin.site.register(Redirect, RedirectAdmin)
| from __future__ import absolute_import
from django.contrib import admin
from .models import Redirect
class RedirectAdmin(admin.ModelAdmin):
list_display = ['project', 'redirect_type', 'from_url', 'to_url']
admin.site.register(Redirect, RedirectAdmin)
| mit | Python |
f4822a15ba2360d90da29f27e5ca0802a8feaf4b | Bump version to 12.1.5 | hhursev/recipe-scraper | recipe_scrapers/__version__.py | recipe_scrapers/__version__.py | __version__ = "12.1.5"
| __version__ = "12.0.5"
| mit | Python |
697f55cf757edd6587e00bbc98533133c00b6d10 | Update __version__ as it seems like I have forgotten how publishing works | hhursev/recipe-scraper | recipe_scrapers/__version__.py | recipe_scrapers/__version__.py | __version__ = "13.6.0"
| __version__ = "13.5.0"
| mit | Python |
564940c0a588bb34455af499754b1be46d690c7b | Bump version to 12.0.5 | hhursev/recipe-scraper | recipe_scrapers/__version__.py | recipe_scrapers/__version__.py | __version__ = "12.0.5"
| __version__ = "12.0.4"
| mit | Python |
b9c54a3a7169bdaa04539f61f97489e2d7fc70ea | Remove test error trigger | ministryofjustice/cla_frontend,ministryofjustice/cla_frontend,ministryofjustice/cla_frontend,ministryofjustice/cla_frontend | cla_frontend/apps/status/views.py | cla_frontend/apps/status/views.py | import datetime
from django.http import JsonResponse
from django.shortcuts import render
from django.views.generic import View
from cla_common.smoketest import smoketest
from .smoketests import smoketests
def status(request):
results = list(smoketests.execute())
passed = reduce(lambda acc, curr: acc and curr["status"], results, True)
return render(
request,
"status/status_page.html",
{"passed": passed, "last_updated": datetime.datetime.now(), "smoketests": results},
)
def smoketests_json(request):
"""
Run smoke tests and return results as JSON datastructure
"""
from cla_frontend.apps.status.tests.smoketests import SmokeTests
return JsonResponse(smoketest(SmokeTests))
class PingJsonView(View):
"""
Stub IRaT PingJsonView for compatibility with current and imminent move to Kubernetes, obviating this view
"""
def get(self, request):
response_data = {"build_tag": None, "build_date": None, "version_number": None, "commit_id": None}
return JsonResponse(response_data)
| import datetime
from django.http import JsonResponse
from django.shortcuts import render
from django.views.generic import View
from cla_common.smoketest import smoketest
from .smoketests import smoketests
def status(request):
if "error" in request.GET.keys():
raise Exception("ISSUE: %s" % datetime.datetime.now())
results = list(smoketests.execute())
passed = reduce(lambda acc, curr: acc and curr["status"], results, True)
return render(
request,
"status/status_page.html",
{"passed": passed, "last_updated": datetime.datetime.now(), "smoketests": results},
)
def smoketests_json(request):
"""
Run smoke tests and return results as JSON datastructure
"""
from cla_frontend.apps.status.tests.smoketests import SmokeTests
return JsonResponse(smoketest(SmokeTests))
class PingJsonView(View):
"""
Stub IRaT PingJsonView for compatibility with current and imminent move to Kubernetes, obviating this view
"""
def get(self, request):
if "error" in request.GET.keys():
raise Exception("ISSUE: %s" % datetime.datetime.now())
response_data = {"build_tag": None, "build_date": None, "version_number": None, "commit_id": None}
return JsonResponse(response_data)
| mit | Python |
969b2d322174392a85f6fa6fc92160cb18144594 | Allow for empty tags and authors on `ContentSerializer` | theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,pombredanne/django-bulbs,pombredanne/django-bulbs,theonion/django-bulbs | bulbs/content/serializers.py | bulbs/content/serializers.py | from django import forms
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Content, Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class SimpleAuthorSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name')
class ContentSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='content-detail',
lookup_field='pk'
)
tags = serializers.PrimaryKeyRelatedField(many=True, required=False)
authors = serializers.PrimaryKeyRelatedField(many=True, required=False)
class Meta:
model = Content
exclude = ('polymorphic_ctype',)
class ContentSerializerReadOnly(ContentSerializer):
tags = TagSerializer(many=True, required=False)
authors = SimpleAuthorSerializer(many=True, required=False)
class PolymorphicContentSerializerMixin(object):
def to_native(self, value):
if hasattr(value, 'get_serializer_class'):
ThisSerializer = value.get_serializer_class()
else:
class ThisSerializer(serializers.ModelSerializer):
class Meta:
model = value.__class__
serializer = ThisSerializer(context=self.context)
return serializer.to_native(value)
class PolymorphicContentSerializer(ContentSerializer, PolymorphicContentSerializerMixin):
pass
class PolymorphicContentSerializerReadOnly(ContentSerializerReadOnly, PolymorphicContentSerializerMixin):
pass
| from django import forms
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Content, Tag
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class SimpleAuthorSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name')
class ContentSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='content-detail',
lookup_field='pk'
)
class Meta:
model = Content
exclude = ('polymorphic_ctype',)
class ContentSerializerReadOnly(ContentSerializer):
tags = TagSerializer(many=True, required=False)
authors = SimpleAuthorSerializer(many=True, required=False)
class PolymorphicContentSerializerMixin(object):
def to_native(self, value):
if hasattr(value, 'get_serializer_class'):
ThisSerializer = value.get_serializer_class()
else:
class ThisSerializer(serializers.ModelSerializer):
class Meta:
model = value.__class__
serializer = ThisSerializer(context=self.context)
return serializer.to_native(value)
class PolymorphicContentSerializer(ContentSerializer, PolymorphicContentSerializerMixin):
pass
class PolymorphicContentSerializerReadOnly(ContentSerializerReadOnly, PolymorphicContentSerializerMixin):
pass
| mit | Python |
fabbc25e32203ec0fa854c85b77834534ec099f7 | Fix merge conflict | cobalt-uoft/uoft-scrapers,kshvmdn/uoft-scrapers,arkon/uoft-scrapers,g3wanghc/uoft-scrapers | uoftscrapers/__init__.py | uoftscrapers/__init__.py | import logging
import os
import sys
from .scrapers.buildings import Buildings
from .scrapers.calendar.utsg import UTSGCalendar
from .scrapers.coursefinder import CourseFinder
from .scrapers.exams.utm import UTMExams
from .scrapers.exams.utsc import UTSCExams
from .scrapers.exams.utsg import UTSGExams
from .scrapers.food import Food
from .scrapers.textbooks import Textbooks
from .scrapers.timetable.utm import UTMTimetable
from .scrapers.timetable.utsc import UTSCTimetable
from .scrapers.timetable.utsg import UTSGTimetable
from .scrapers.utmshuttle import UTMShuttle
from .scrapers.parking.utsg import UTSGParking
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger("uoftscrapers").addHandler(NullHandler())
| import logging
import os
import sys
from .scrapers.buildings import Buildings
from .scrapers.calendar.utsg import UTSGCalendar
from .scrapers.coursefinder import CourseFinder
from .scrapers.exams.utm import UTMExams
from .scrapers.exams.utsc import UTSCExams
from .scrapers.exams.utsg import UTSGExams
from .scrapers.food import Food
from .scrapers.textbooks import Textbooks
from .scrapers.timetable.utm import UTMTimetable
from .scrapers.timetable.utsc import UTSCTimetable
from .scrapers.timetable.utsg import UTSGTimetable
from .scrapers.utmshuttle import UTMShuttle
from .scrapers.parking.utsg import UTSGParking
from .scrapers.utmathletics import UTMAthletics
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger("uoftscrapers").addHandler(NullHandler())
| mit | Python |
0ed3ed8b0ef9742df09945483c5886722a75499b | Add a test for user deadlines | wtsi-hgi/CoGS-Webapp,wtsi-hgi/CoGS-Webapp,wtsi-hgi/CoGS-Webapp | test/scheduler/test_scheduler.py | test/scheduler/test_scheduler.py | """
Copyright (c) 2017 Genome Research Ltd.
Authors:
* Christopher Harrison <ch12@sanger.ac.uk>
* Simon Beal <sb48@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import unittest
from unittest.mock import MagicMock, patch
from datetime import date
from cogs.scheduler import Scheduler
from cogs.scheduler.constants import GROUP_DEADLINES, USER_DEADLINES
_MOCK_SCHEDULER_ARGS = (MagicMock(),) * 3
@patch("cogs.scheduler.scheduler.AsyncIOScheduler", spec=True)
class TestScheduler(unittest.TestCase):
def test_constructor(self, mock_scheduler):
s = Scheduler(*_MOCK_SCHEDULER_ARGS)
mock_scheduler.assert_called_once()
s._scheduler.start.assert_called_once()
def test_schedule_deadline(self, mock_scheduler):
s = Scheduler(*_MOCK_SCHEDULER_ARGS)
self.assertRaises(AssertionError, s.schedule_deadline, date.today(), "foobar", None)
for deadline_id, deadline in GROUP_DEADLINES.items():
s._scheduler.add_job.reset_mock()
s.schedule_deadline(date.today(), deadline_id, MagicMock())
s._scheduler.add_job.assert_called_once()
s._scheduler.add_job.reset_mock()
s.schedule_deadline(date.today(), deadline_id, MagicMock(), to="foo@bar")
calls = s._scheduler.add_job.call_count
self.assertEqual(calls, 1 + len(deadline.pester_times))
def test_schedule_user_deadline(self, mock_scheduler):
s = Scheduler(*_MOCK_SCHEDULER_ARGS)
self.assertRaises(AssertionError, s.schedule_user_deadline, date.today(), "foobar", None)
for deadline_id, deadline in USER_DEADLINES.items():
s._scheduler.add_job.reset_mock()
s.schedule_user_deadline(date.today(), deadline_id, MagicMock())
s._scheduler.add_job.assert_called_once()
if __name__ == "__main__":
unittest.main()
| """
Copyright (c) 2017 Genome Research Ltd.
Authors:
* Christopher Harrison <ch12@sanger.ac.uk>
* Simon Beal <sb48@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import unittest
from unittest.mock import MagicMock, patch
from datetime import date
from cogs.scheduler import Scheduler
from cogs.scheduler.constants import GROUP_DEADLINES
_MOCK_SCHEDULER_ARGS = (MagicMock(),) * 3
@patch("cogs.scheduler.scheduler.AsyncIOScheduler", spec=True)
class TestScheduler(unittest.TestCase):
def test_constructor(self, mock_scheduler):
s = Scheduler(*_MOCK_SCHEDULER_ARGS)
mock_scheduler.assert_called_once()
s._scheduler.start.assert_called_once()
def test_schedule_deadline(self, mock_scheduler):
s = Scheduler(*_MOCK_SCHEDULER_ARGS)
self.assertRaises(AssertionError, s.schedule_deadline, date.today(), "foobar", None)
for deadline_id, deadline in GROUP_DEADLINES.items():
s._scheduler.add_job.reset_mock()
s.schedule_deadline(date.today(), deadline_id, MagicMock())
s._scheduler.add_job.assert_called_once()
s._scheduler.add_job.reset_mock()
s.schedule_deadline(date.today(), deadline_id, MagicMock(), to="foo@bar")
calls = s._scheduler.add_job.call_count
self.assertEqual(calls, 1 + len(deadline.pester_times))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | Python |
98c079e60c5f40190986cf3711755f3e76642393 | test for determinism | fartashf/cleverhans,carlini/cleverhans,cleverhans-lab/cleverhans,cleverhans-lab/cleverhans,cihangxie/cleverhans,carlini/cleverhans,cleverhans-lab/cleverhans,openai/cleverhans | tests_tf/test_mnist_tutorial_tf.py | tests_tf/test_mnist_tutorial_tf.py | import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialTF(CleverHansTest):
def test_mnist_tutorial_tf(self):
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from cleverhans_tutorials import mnist_tutorial_tf
# Run the MNIST tutorial on a dataset of reduced size
test_dataset_indices = {'train_start': 0,
'train_end': 5000,
'test_start': 0,
'test_end': 333,
'nb_epochs': 2,
'testing': True}
report = mnist_tutorial_tf.mnist_tutorial(**test_dataset_indices)
# Check accuracy values contained in the AccuracyReport object
self.assertGreater(report.train_clean_train_clean_eval, 0.97)
self.assertLess(report.train_clean_train_adv_eval, 0.02)
self.assertGreater(report.train_adv_train_clean_eval, 0.95)
self.assertGreater(report.train_adv_train_adv_eval, 0.4)
report_2 = mnist_tutorial_tf.mnist_tutorial(**test_dataset_indices)
self.assertClose(report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval)
self.assertClose(report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval)
self.assertClose(report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval)
self.assertClose(report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval)
if __name__ == '__main__':
unittest.main()
| import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
class TestMNISTTutorialTF(CleverHansTest):
def test_mnist_tutorial_tf(self):
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from cleverhans_tutorials import mnist_tutorial_tf
# Run the MNIST tutorial on a dataset of reduced size
test_dataset_indices = {'train_start': 0,
'train_end': 5000,
'test_start': 0,
'test_end': 333,
'nb_epochs': 2,
'testing': True}
report = mnist_tutorial_tf.mnist_tutorial(**test_dataset_indices)
# Check accuracy values contained in the AccuracyReport object
self.assertGreater(report.train_clean_train_clean_eval, 0.97)
self.assertLess(report.train_clean_train_adv_eval, 0.02)
self.assertGreater(report.train_adv_train_clean_eval, 0.95)
self.assertGreater(report.train_adv_train_adv_eval, 0.4)
if __name__ == '__main__':
unittest.main()
| mit | Python |
8dbb26e16549915dcb9b6773ead6c30d567b81e5 | Bump version | bebraw/pynu,bebraw/pynu | pynu/__init__.py | pynu/__init__.py | # -*- coding: utf-8 -*-
"""
Node utilities for Python.
"""
"""
Pynu - Python Node Utilities
Copyright (C) 2013 Juho Vepsäläinen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/
"""
from graph import GraphNode
from tree import TreeNode
__author__ = 'Juho Vepsäläinen'
__version__ = '0.1.2'
| # -*- coding: utf-8 -*-
"""
Node utilities for Python.
"""
"""
Pynu - Python Node Utilities
Copyright (C) 2010 Juho Vepsäläinen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/
"""
from graph import GraphNode
from tree import TreeNode
__author__ = 'Juho Vepsäläinen'
__version__ = '0.1.1'
| mit | Python |
03dc74261338ac6905526bac77b2c9374f317f7b | Support pep257 0.3.0 | PyCQA/flake8-docstrings | flake8_docstrings.py | flake8_docstrings.py | # -*- coding: utf-8 -*-
"""pep257 docstrings convention needs error code and class parser for be
included as module into flakes8
"""
import pep257
__version__ = '0.1.5'
class pep257Checker(object):
"""flake8 needs a class to check python file."""
name = 'pep257'
version = __version__
def __init__(self, tree, filename='(none)', builtins=None):
self.tree = tree
self.filename = filename
def run(self):
"""Use directly check_source api from pep257."""
errors = list()
with open(self.filename, 'r') as handle:
for error in pep257.PEP257Checker().check_source(
handle.read(), self.filename):
errors.append(error)
for error in errors:
yield (error.line, 0, error.message, type(self))
| # -*- coding: utf-8 -*-
"""pep257 docstrings convention needs error code and class parser for be
included as module into flakes8
"""
import pep257
__version__ = '0.1.4'
class pep257Checker(object):
"""flake8 needs a class to check python file."""
name = 'pep257'
version = __version__
def __init__(self, tree, filename='(none)', builtins=None):
self.tree = tree
self.filename = filename
def run(self):
"""Use directly check_source api from pep257."""
self.messages = list()
with open(self.filename, 'r') as fd:
for elem in pep257.check_source(fd.read(), self.filename):
self.messages.append(str(elem))
for m in self.messages:
log_mess = m.split(':')
yield (int(log_mess[1]), int(log_mess[2]), 'DOC0' + log_mess[3],
type(self))
| mit | Python |
d06e472cb0a33d6aa927972a3d5e89e7ee392b2f | resolve deprecation warning | kmike/django-qsstats-magic | qsstats/utils.py | qsstats/utils.py | import datetime
import re
from dateutil.relativedelta import relativedelta, MO
from qsstats.exceptions import InvalidInterval
from qsstats import compat
def _remove_time(dt):
tzinfo = getattr(dt, 'tzinfo', compat.now().tzinfo)
return datetime.datetime(dt.year, dt.month, dt.day, tzinfo=tzinfo)
def _to_datetime(dt):
if isinstance(dt, datetime.datetime):
return dt
return _remove_time(dt)
def _parse_interval(interval):
num = 1
match = re.match(r'(\d+)([A-Za-z]+)', interval)
if match:
num = int(match.group(1))
interval = match.group(2)
return num, interval
def get_bounds(dt, interval):
''' Returns interval bounds the datetime is in. '''
day = _to_datetime(_remove_time(dt))
dt = _to_datetime(dt)
if interval == 'minute':
begin = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, tzinfo=dt.tzinfo)
end = begin + relativedelta(minutes=1)
elif interval == 'hour':
begin = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, tzinfo=dt.tzinfo)
end = begin + relativedelta(hours=1)
elif interval == 'day':
begin = day
end = day + relativedelta(days=1)
elif interval == 'week':
begin = day - relativedelta(weekday=MO(-1))
end = begin + datetime.timedelta(days=7)
elif interval == 'month':
begin = datetime.datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
end = begin + relativedelta(months=1)
elif interval == 'year':
begin = datetime.datetime(dt.year, 1, 1, tzinfo=dt.tzinfo)
end = datetime.datetime(dt.year+1, 1, 1, tzinfo=dt.tzinfo)
else:
raise InvalidInterval('Inverval not supported.')
end = end - relativedelta(microseconds=1)
return begin, end
| import datetime
import re
from dateutil.relativedelta import relativedelta, MO
from qsstats.exceptions import InvalidInterval
from qsstats import compat
def _remove_time(dt):
tzinfo = getattr(dt, 'tzinfo', compat.now().tzinfo)
return datetime.datetime(dt.year, dt.month, dt.day, tzinfo=tzinfo)
def _to_datetime(dt):
if isinstance(dt, datetime.datetime):
return dt
return _remove_time(dt)
def _parse_interval(interval):
num = 1
match = re.match('(\d+)([A-Za-z]+)', interval)
if match:
num = int(match.group(1))
interval = match.group(2)
return num, interval
def get_bounds(dt, interval):
''' Returns interval bounds the datetime is in. '''
day = _to_datetime(_remove_time(dt))
dt = _to_datetime(dt)
if interval == 'minute':
begin = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, tzinfo=dt.tzinfo)
end = begin + relativedelta(minutes=1)
elif interval == 'hour':
begin = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, tzinfo=dt.tzinfo)
end = begin + relativedelta(hours=1)
elif interval == 'day':
begin = day
end = day + relativedelta(days=1)
elif interval == 'week':
begin = day - relativedelta(weekday=MO(-1))
end = begin + datetime.timedelta(days=7)
elif interval == 'month':
begin = datetime.datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
end = begin + relativedelta(months=1)
elif interval == 'year':
begin = datetime.datetime(dt.year, 1, 1, tzinfo=dt.tzinfo)
end = datetime.datetime(dt.year+1, 1, 1, tzinfo=dt.tzinfo)
else:
raise InvalidInterval('Inverval not supported.')
end = end - relativedelta(microseconds=1)
return begin, end
| bsd-3-clause | Python |
226abc572d32c134783e6b149961c6fb2e7921c6 | Create .pc dir and dir for patches | vadmium/python-quilt,bjoernricks/python-quilt | quilt/cli/new.py | quilt/cli/new.py | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
import os.path
from optparse import OptionParser
from quilt.db import Series, Db
from quilt.patch import Patch
from quilt.utils import File, Directory
def parse(args):
usage = "%prog new patchname"
parser = OptionParser(usage=usage)
(options, pargs) = parser.parse_args(args)
if len(args) != 1:
parser.print_usage()
sys.exit(1)
newpatch = args[0]
patches = os.environ.get("QUILT_PATCHES")
if not patches:
patches = "patches"
series = Series(patches)
if series.is_patch(newpatch):
print >> sys.stderr, "Patch %s already exists" % newpatch
sys.exit(2)
patch_dir = Directory(patches)
patch_dir.create()
patchfile = patch_dir + File(newpatch)
patchfile.touch()
db = Db(".pc")
if not db.exists():
db.create()
pc_dir = Directory(os.path.join(".pc", newpatch))
# be sure that the directory is clear
pc_dir.delete()
pc_dir.create()
top = db.top_patch()
series.add_patches([Patch(newpatch)], top)
series.save()
| # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
import os.path
from optparse import OptionParser
from quilt.db import Series, Db
from quilt.patch import Patch
from quilt.utils import File, Directory
def parse(args):
usage = "%prog new patchname"
parser = OptionParser(usage=usage)
(options, pargs) = parser.parse_args(args)
if len(args) != 1:
parser.print_usage()
sys.exit(1)
newpatch = args[0]
patches = os.environ.get("QUILT_PATCHES")
if not patches:
patches = "patches"
series = Series(patches)
if series.is_patch(newpatch):
print >> sys.stderr, "Patch %s already exists" % newpatch
sys.exit(2)
patch_dir = Directory(patches)
patch_dir.create()
patchfile = patch_dir + File(newpatch)
patchfile.touch()
db = Db(".pc")
top = db.top_patch()
series.add_patches([Patch(newpatch)], top)
series.save()
| mit | Python |
d95f448cbf8072205a104a7ef5e2e4db40d68756 | Add popagate_network_log option to grab.tools.logs.default_logging | giserh/grab,shaunstanislaus/grab,alihalabyah/grab,subeax/grab,SpaceAppsXploration/grab,huiyi1990/grab,alihalabyah/grab,kevinlondon/grab,shaunstanislaus/grab,giserh/grab,codevlabs/grab,liorvh/grab,maurobaraldi/grab,raybuhr/grab,kevinlondon/grab,liorvh/grab,lorien/grab,raybuhr/grab,istinspring/grab,huiyi1990/grab,lorien/grab,codevlabs/grab,DDShadoww/grab,istinspring/grab,subeax/grab,pombredanne/grab-1,DDShadoww/grab,maurobaraldi/grab,subeax/grab,pombredanne/grab-1,SpaceAppsXploration/grab | grab/tools/logs.py | grab/tools/logs.py | import logging
def default_logging(grab_log='/tmp/grab.log', level=logging.DEBUG, mode='a',
propagate_network_logger=False,
network_log='/tmp/grab.network.log'):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = propagate_network_logger
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level)
| import logging
def default_logging(grab_log='/tmp/grab.log', level=logging.DEBUG, mode='a',
network_log='/tmp/grab.network.log'):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = False
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level)
| mit | Python |
8d6d6d5d23649a10cab17f04b8fa102809ce3f11 | Fix typo in docstrings (#8764) | adityahase/frappe,vjFaLk/frappe,vjFaLk/frappe,saurabh6790/frappe,yashodhank/frappe,frappe/frappe,saurabh6790/frappe,StrellaGroup/frappe,frappe/frappe,almeidapaulopt/frappe,frappe/frappe,saurabh6790/frappe,mhbu50/frappe,almeidapaulopt/frappe,adityahase/frappe,yashodhank/frappe,saurabh6790/frappe,mhbu50/frappe,StrellaGroup/frappe,adityahase/frappe,adityahase/frappe,StrellaGroup/frappe,almeidapaulopt/frappe,vjFaLk/frappe,mhbu50/frappe,almeidapaulopt/frappe,yashodhank/frappe,yashodhank/frappe,vjFaLk/frappe,mhbu50/frappe | frappe/core/utils.py | frappe/core/utils.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def get_parent_doc(doc):
"""Returns document of `reference_doctype`, `reference_doctype`"""
if not hasattr(doc, "parent_doc"):
if doc.reference_doctype and doc.reference_name:
doc.parent_doc = frappe.get_doc(doc.reference_doctype, doc.reference_name)
else:
doc.parent_doc = None
return doc.parent_doc
def set_timeline_doc(doc):
"""Set timeline_doctype and timeline_name"""
parent_doc = get_parent_doc(doc)
if (doc.timeline_doctype and doc.timeline_name) or not parent_doc:
return
timeline_field = parent_doc.meta.timeline_field
if not timeline_field:
return
doctype = parent_doc.meta.get_link_doctype(timeline_field)
name = parent_doc.get(timeline_field)
if doctype and name:
doc.timeline_doctype = doctype
doc.timeline_name = name
else:
return
def find(list_of_dict, match_function):
'''Returns a dict in a list of dicts on matching the conditions
provided in match function
Usage:
list_of_dict = [{'name': 'Suraj'}, {'name': 'Aditya'}]
required_dict = find(list_of_dict, lambda d: d['name'] == 'Aditya')
'''
for entry in list_of_dict:
if match_function(entry):
return entry
return None
def find_all(list_of_dict, match_function):
'''Returns all matching dicts in a list of dicts.
Uses matching function to filter out the dicts
Usage:
colored_shapes = [
{'color': 'red', 'shape': 'square'},
{'color': 'red', 'shape': 'circle'},
{'color': 'blue', 'shape': 'triangle'}
]
red_shapes = find_all(colored_shapes, lambda d: d['color'] == 'red')
'''
found = []
for entry in list_of_dict:
if match_function(entry):
found.append(entry)
return found
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def get_parent_doc(doc):
"""Returns document of `reference_doctype`, `reference_doctype`"""
if not hasattr(doc, "parent_doc"):
if doc.reference_doctype and doc.reference_name:
doc.parent_doc = frappe.get_doc(doc.reference_doctype, doc.reference_name)
else:
doc.parent_doc = None
return doc.parent_doc
def set_timeline_doc(doc):
"""Set timeline_doctype and timeline_name"""
parent_doc = get_parent_doc(doc)
if (doc.timeline_doctype and doc.timeline_name) or not parent_doc:
return
timeline_field = parent_doc.meta.timeline_field
if not timeline_field:
return
doctype = parent_doc.meta.get_link_doctype(timeline_field)
name = parent_doc.get(timeline_field)
if doctype and name:
doc.timeline_doctype = doctype
doc.timeline_name = name
else:
return
def find(list_of_dict, match_function):
'''Returns a dict in a list of dicts on matching the conditions
provided in match function
Usage:
list_of_dict = [{'name': 'Suraj'}, {'name': 'Aditya'}]
required_dict = find(list_of_dict, lamda d: d['name'] == 'Aditya')
'''
for entry in list_of_dict:
if match_function(entry):
return entry
return None
def find_all(list_of_dict, match_function):
'''Returns all matching dicts in a list of dicts.
Uses matching function to filter out the dicts
Usage:
colored_shapes = [
{'color': 'red', 'shape': 'square'},
{'color': 'red', 'shape': 'circle'},
{'color': 'blue', 'shape': 'triangle'}
]
red_shapes = find_all(colored_shapes, lamda d: d['color'] == 'red')
'''
found = []
for entry in list_of_dict:
if match_function(entry):
found.append(entry)
return found | mit | Python |
8eaa3e2c19f5d907eacf1e308ad410769ccc4101 | Update externsExtractor.py | LiveTex/Livetex-Tools,LiveTex/Livetex-Tools | tools/externs-extractor/externsExtractor.py | tools/externs-extractor/externsExtractor.py | #!/usr/bin/python
import os
from optparse import OptionParser
from extractors.elementsExtractor import extractElements
def getPaths(path):
paths = list()
pathsListFile = open(path, 'r')
pathsList = pathsListFile.read().splitlines()
for path in pathsList:
if path:
paths.append('./lib/' + path)
return paths
def main():
usage = "usage: jstuff [--p path_to_files_set]"
parser = OptionParser(usage)
parser.add_option("-i", "--input",
action="store",
default='./etc/build/index.d',
dest="input",
help="Input path to file with project files.")
parser.add_option("-o", "--out",
action="store",
default='./externs/index.js',
dest="output",
help="Input path to externs file.")
(options, args) = parser.parse_args()
indexdpaths = options.input
if not os.path.exists(indexdpaths):
indexdpaths = './etc/index.d'
paths = getPaths(indexdpaths)
out = options.output
externs = ''
if os.path.exists(out):
os.remove(out)
file = open(out, 'w')
for path in paths:
elements = extractElements(path)
for element in elements:
if not element.isPrivate():
externs += element.getExterns()
externs += '\n\n'
externs += '\n'
file.write(externs)
file.close()
if __name__ == "__main__":
main()
| #!/usr/bin/python2
import os
from optparse import OptionParser
from extractors.elementsExtractor import extractElements
def getPaths(path):
paths = list()
pathsListFile = open(path, 'r')
pathsList = pathsListFile.read().splitlines()
for path in pathsList:
if path:
paths.append('./lib/' + path)
return paths
def main():
usage = "usage: jstuff [--p path_to_files_set]"
parser = OptionParser(usage)
parser.add_option("-i", "--input",
action="store",
default='./etc/build/index.d',
dest="input",
help="Input path to file with project files.")
parser.add_option("-o", "--out",
action="store",
default='./externs/index.js',
dest="output",
help="Input path to externs file.")
(options, args) = parser.parse_args()
indexdpaths = options.input
if not os.path.exists(indexdpaths):
indexdpaths = './etc/index.d'
paths = getPaths(indexdpaths)
out = options.output
externs = ''
if os.path.exists(out):
os.remove(out)
file = open(out, 'w')
for path in paths:
elements = extractElements(path)
for element in elements:
if not element.isPrivate():
externs += element.getExterns()
externs += '\n\n'
externs += '\n'
file.write(externs)
file.close()
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
2fb66af07b38b4744fab53f86c2ac6048e47e9bc | Make geocode CLI PEP-8 compliant | minimedj/geocoder,miraculixx/geocoder,epyatopal/geocoder-1,akittas/geocoder,ahlusar1989/geocoder,DenisCarriere/geocoder | geocode.py | geocode.py | import sys
import click
import unicodecsv
import geocoder
@click.command()
@click.argument('location', required=False)
@click.option('--provider', default='bing')
@click.option('--input', type=click.File('r'))
@click.option('--output', type=click.File('w'), default='-')
@click.option('--fieldnames')
def cli(location, provider, input, output, fieldnames):
"""
This is the help function part of the script\n
geocode --string
"""
container = []
# Reading Input files
if input:
reader = unicodecsv.DictReader(input)
first_column = reader.fieldnames[0]
# Loop inside the reader
for item in reader:
location = item[first_column]
g = geocoder.geocode(location, provider=provider)
# Format Row
row = dict(g.json.items() + item.items())
# Print Results & Add
container.append(row)
if output.name == '<stdout>':
click.echo(row)
# Reading Single Input
else:
g = geocoder.geocode(location, provider=provider)
container.append(g.json)
if output.name == '<stdout>':
click.echo(g.json)
# Saving Results
if container:
first = container[0]
fieldnames = first.keys()
if not output.name == '<stdout>':
writer = unicodecsv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
for row in container:
writer.writerow(row)
| import click
import geocoder
import unicodecsv
import sys
@click.command()
@click.argument('location', required=False)
@click.option('--provider', default='bing')
@click.option('--input', type=click.File('r'))
@click.option('--output', type=click.File('w'), default='-')
@click.option('--fieldnames')
def cli(location, provider, input, output, fieldnames):
"""
This is the help function part of the script\n
geocode --string
"""
container = []
# Reading Input files
if input:
reader = unicodecsv.DictReader(input)
first_column = reader.fieldnames[0]
# Loop inside the reader
for item in reader:
location = item[first_column]
g = geocoder.geocode(location, provider=provider)
# Format Row
row = dict(g.json.items() + item.items())
# Print Results & Add
container.append(row)
if output.name == '<stdout>':
click.echo(row)
# Reading Single Input
else:
g = geocoder.geocode(location, provider=provider)
container.append(g.json)
if output.name == '<stdout>':
click.echo(g.json)
# Saving Results
if container:
first = container[0]
fieldnames = first.keys()
if not output.name == '<stdout>':
writer = unicodecsv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
for row in container:
writer.writerow(row)
| mit | Python |
556dec981b7a7f204b01a7e83d1af80b18228da8 | Fix Bugs | jiasir/get-dns | get-dns.py | get-dns.py | __author__ = 'Taio'
import os.path
import dns.query
import dns.zone
import logging
from utils.noflib import Noflib
run = Noflib()
logger = logging.getLogger('get-dns')
logging.basicConfig(filename='get-dns.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
dcAdd = '172.20.10.75'
domainName = 'spidc1.com'
z = dns.zone.from_xfr(dns.query.xfr(dcAdd, domainName))
names = z.nodes.keys()
names.sort()
def print_records_stdout():
'''Print records only to stdout'''
for i in names:
if i.find('IN A'):
print z[i].to_text(n)
def gen_records_spidc1():
'''Write to /etc/hosts file'''
try:
with open('/etc/hosts', 'a') as f:
for i in names:
f.write(z[i].to_text(n))
except IOError:
print IOError.__doc__
def main():
if not os.path.exists('/var/log/get-dns'):
run.execute_get_output('sudo', 'mkdir', '/var/log/get-dns')
print_records_stdout()
#gen_records_spidc1()
if __name__ == '__main__':
if os.getuid() == 0:
main()
else:
print 'You do not have permission, please run as root.'
exit() | __author__ = 'Taio'
import os.path
import dns.querry
import dns.zone
import logging
from utils.noflib import Noflib
run = Noflib()
logger = logging.getLogger('get-dns')
logging.basicConfig(filename='get-dns.log', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
dcAdd = '172.20.10.75'
domainName = 'spidc1.com'
z = dns.zone.from_xfr(dns.querry.xfr(dcAdd), domainName)
names = z.nodes.keys()
names.sort()
def print_records_stdout():
'''Print records only to stdout'''
for i in names:
print z[i].to_text(n)
def gen_records_spidc1():
'''Write to /etc/hosts file'''
try:
with open('/etc/hosts', 'a') as f:
for i in names:
f.write(z[i].to_text(n))
except IOError:
print IOError.__doc__
def main():
if not os.path.exists('/var/log/get-dns'):
run.execute_get_output('sudo', 'mkdir', '/var/log/get-dns')
gen_records_spidc1()
if __name__ == 'main':
if os.getuid() == 0:
main()
else:
print 'You do not have permission, please run as root.'
exit() | mit | Python |
70434e9a752b88aae2a741bafb1427e69da8654c | Update metadata api | daniel1409/dataviva-api,DataViva/dataviva-api | app/apis/metadata_api.py | app/apis/metadata_api.py | from flask import Blueprint, jsonify, request
from inflection import singularize
from app import redis
from os import path
import pickle
blueprint = Blueprint('metadata_api', __name__, url_prefix='/metadata')
@blueprint.route('/<string:data>/<string:id>')
@blueprint.route('/<string:data>')
def api(data, id=None):
exceptions = [
'cnes_ind_sus',
'cnes_tipodeurgencia',
'cnes_atendhos',
'cnes_coletres',
'cnes',
]
if data not in exceptions:
data = singularize(data)
if id:
data = data + '/' + id
return jsonify(pickle.loads(redis.get(data)))
| from flask import Blueprint, jsonify, request
from inflection import singularize
from app import redis
from os import path
import pickle
blueprint = Blueprint('metadata_api', __name__, url_prefix='/metadata')
@blueprint.route('/<string:data>/<string:id>')
@blueprint.route('/<string:data>')
def api(data, id=None):
exceptions = [
'cnes_ind_sus',
'cnes_tipodeurgencia',
'cnes_atendhos',
'cnes_coletres'
]
if data not in exceptions:
data = singularize(data)
if id:
data = data + '/' + id
return jsonify(pickle.loads(redis.get(data)))
| mit | Python |
dda91ae23ece88abebc3ff9dbbaf163d05ceaf39 | Simplify namespace | williamjacksn/python-rainwave-client | gutter/__init__.py | gutter/__init__.py | from .client import RainwaveClient
from .channel import RainwaveChannel
from .album import RainwaveAlbum
from .artist import RainwaveArtist
from .cooldown import RainwaveCooldown
from .song import RainwaveSong
| from .client import RainwaveClient
| mit | Python |
a56c744af8c1a96631d990c2a398aedcd24a78de | Set the response's mimetype. Motion cannot process HTTP responses without an image mimetype. | flebel/rtsp2snapshot | rtsp2snapshot.py | rtsp2snapshot.py | #!/usr/bin/env python
import io
import platform
import shlex
import subprocess
from flask import Flask, request, send_file, send_from_directory
app = Flask(__name__)
tmp_dir = '/tmp/'
tmp_filename = 'snapshot.jpg'
@app.route('/<path:url>')
def snapshot(url):
freebsd_platform = platform.system() == 'FreeBSD'
if request.query_string:
url += '?' + request.query_string
# TODO: Sanitize interpolated string
cmd = 'ffmpeg -rtsp_transport tcp -i "rtsp://%s" -hide_banner -loglevel quiet -ss 00:00:01.500 -f image2 -vframes 1 -y ' % (url,)
if freebsd_platform:
cmd += tmp_dir + tmp_filename
else:
cmd += '-'
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
p.wait()
image = p.stdout.read()
if freebsd_platform:
return send_from_directory(tmp_dir,
tmp_filename,
mimetype='image/jpeg')
return send_file(io.BytesIO(image),
attachment_filename='snapshot.jpg',
mimetype='image/jpeg')
if __name__ == '__main__':
app.run(debug=True, port=5000)
| #!/usr/bin/env python
import io
import platform
import shlex
import subprocess
from flask import Flask, request, send_file, send_from_directory
app = Flask(__name__)
tmp_dir = '/tmp/'
tmp_filename = 'snapshot.jpg'
@app.route('/<path:url>')
def snapshot(url):
freebsd_platform = platform.system() == 'FreeBSD'
if request.query_string:
url += '?' + request.query_string
# TODO: Sanitize interpolated string
cmd = 'ffmpeg -rtsp_transport tcp -i "rtsp://%s" -hide_banner -loglevel quiet -ss 00:00:01.500 -f image2 -vframes 1 -y ' % (url,)
if freebsd_platform:
cmd += tmp_dir + tmp_filename
else:
cmd += '-'
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
p.wait()
image = p.stdout.read()
if freebsd_platform:
return send_from_directory(tmp_dir, tmp_filename)
return send_file(io.BytesIO(image),
attachment_filename='snapshot.jpg',
mimetype='image/jpeg')
if __name__ == '__main__':
app.run(debug=True, port=5000)
| bsd-3-clause | Python |
37aa3ae42dafe58ca29152f75a36ea77b792393e | rename 'project.home.user' to 'project.dash' | pkimber/search,pkimber/search,pkimber/search | example/urls.py | example/urls.py | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from .views import HomeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(regex=r'^$',
view=HomeView.as_view(),
name='project.home'
),
url(regex=r'^',
view=include('login.urls')
),
url(regex=r'^',
view=include('search.urls')
),
url(regex=r'^admin/',
view=include(admin.site.urls)
),
url(r'^home/user/$',
view=RedirectView.as_view(url=reverse_lazy('project.home')),
name='project.dash'
),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# ^ helper function to return a URL pattern for serving files in debug mode.
# https://docs.djangoproject.com/en/1.5/howto/static-files/#serving-files-uploaded-by-a-user
urlpatterns += staticfiles_urlpatterns()
| # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from .views import HomeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(regex=r'^$',
view=HomeView.as_view(),
name='project.home'
),
url(regex=r'^',
view=include('login.urls')
),
url(regex=r'^',
view=include('search.urls')
),
url(regex=r'^admin/',
view=include(admin.site.urls)
),
url(r'^home/user/$',
view=RedirectView.as_view(url=reverse_lazy('project.home')),
name='project.home.user'
),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# ^ helper function to return a URL pattern for serving files in debug mode.
# https://docs.djangoproject.com/en/1.5/howto/static-files/#serving-files-uploaded-by-a-user
urlpatterns += staticfiles_urlpatterns()
| apache-2.0 | Python |
813802b70368c7df5747d797628221286988285b | Simplify the XSS test slightly | alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend | tests/app/main/views/test_xss.py | tests/app/main/views/test_xss.py | # coding=utf-8
from __future__ import unicode_literals
import mock
from ...helpers import LoggedInApplicationTest
from dmapiclient import HTTPError
import urllib
class TestXSS(LoggedInApplicationTest):
@mock.patch('app.main.views.services.data_api_client')
def test_service_not_found_flash_message_injection(self, data_api_client):
"""
Asserts that raw HTML in a bad service ID cannot be injected into a flash message.
"""
# impl copied from test_services.TestServiceView#test_redirect_with_flash_for_api_client_404
api_response = mock.Mock()
api_response.status_code = 404
data_api_client.get_service.side_effect = HTTPError(api_response)
evil_service_id = "1<img src=a onerror=alert(1)>"
response1 = self.client.get('/admin/services/' + urllib.quote(evil_service_id))
response2 = self.client.get(response1.location)
self.assertNotIn(evil_service_id, response2.get_data(as_text=True))
@mock.patch('app.main.views.services.data_api_client')
def test_brief_not_found_flash_message_injection(self, data_api_client):
"""
Asserts that raw HTML in a bad brief ID cannot be injected into a flash message.
"""
# impl copied from test_buyers.TestBuyersView#test_should_display_a_useful_message_if_no_brief_found
data_api_client.get_brief.return_value = None
evil_brief_id = "1<img src=a onerror=alert(1)>"
response = self.client.get('admin/buyers?brief_id=' + urllib.quote(evil_brief_id))
self.assertNotIn(evil_brief_id, response.get_data(as_text=True))
| # coding=utf-8
from __future__ import unicode_literals
import mock
from ...helpers import LoggedInApplicationTest
from dmapiclient import HTTPError
import urllib
class TestXSS(LoggedInApplicationTest):
@mock.patch('app.main.views.services.data_api_client')
def test_service_not_found_flash_message_injection(self, data_api_client):
"""
Asserts that raw HTML in a bad service ID cannot be injected into a flash message.
"""
# impl copied from test_services.TestServiceView#test_redirect_with_flash_for_api_client_404
api_response = mock.Mock()
api_response.status_code = 404
data_api_client.get_service.side_effect = HTTPError(api_response)
evil_service_id = "1<img src=a onerror=alert(1)>"
response1 = self.client.get('/admin/services/' + urllib.quote(evil_service_id))
response2 = self.client.get(response1.location)
self.assertNotIn(
b'Error trying to retrieve service with ID: ' + evil_service_id.encode('utf8'),
response2.data)
@mock.patch('app.main.views.services.data_api_client')
def test_brief_not_found_flash_message_injection(self, data_api_client):
"""
Asserts that raw HTML in a bad brief ID cannot be injected into a flash message.
"""
# impl copied from test_buyers.TestBuyersView#test_should_display_a_useful_message_if_no_brief_found
data_api_client.get_brief.return_value = None
evil_brief_id = "1<img src=a onerror=alert(1)>"
response = self.client.get('admin/buyers?brief_id=' + urllib.quote(evil_brief_id))
self.assertNotIn(
b'There are no opportunities with ID ' + evil_brief_id.encode('utf8'),
response.data)
| mit | Python |
6c87c5c160228503f848e3039d1e33362d69f411 | Test helper fill_form: support for skipping fields and relation fields | dpausp/arguments,dpausp/arguments,dpausp/arguments,dpausp/arguments | tests/helpers/webtest_helpers.py | tests/helpers/webtest_helpers.py | from enum import Enum
def python_to_deform_value(py_value):
if py_value is True:
return 'true'
elif py_value is False:
return None
elif py_value is None:
return ''
elif isinstance(py_value, Enum):
return py_value.name
else:
return str(py_value)
def assert_deform(response, expected_form_data={}):
assert 'deform' in response.forms
form = response.forms['deform']
missing_fields = []
for field, value in expected_form_data.items():
if field == 'id':
continue
try:
form[field]
except AssertionError:
missing_fields.append(field)
continue
form_value = form[field].value
deform_value = python_to_deform_value(value)
assert form_value == deform_value, f'form field {field}: form value {form_value} != expected {deform_value}'
if missing_fields:
fields_str = ", ".join(missing_fields)
raise AssertionError(f"missing expected form fields: '{fields_str}'")
return form
def get_session(app, client):
serializer = app.browser_session_interface.get_signing_serializer(app)
assert 'session' in client.cookies
return serializer.loads(client.cookies['session'])
def _set_form_field_value(form, data, field_name, enum_field_names, relation_field_names):
if field_name in enum_field_names:
value = data[field_name].name
form.set(field_name, value)
elif field_name in relation_field_names:
value = data[field_name].id
print(field_name, value)
form.set(field_name + "_id", value)
else:
value = data[field_name]
form.set(field_name, value)
def fill_form(form, data, field_names=None, skip_field_names=[], enum_field_names=[], relation_field_names=[]):
if field_names is None:
for field_name in data:
if field_name not in skip_field_names:
_set_form_field_value(form, data, field_name, enum_field_names, relation_field_names)
else:
for field_name in field_names:
_set_form_field_value(form, data, field_name, enum_field_names, relation_field_names)
return form
| from enum import Enum
def python_to_deform_value(py_value):
if py_value is True:
return 'true'
elif py_value is False:
return None
elif py_value is None:
return ''
elif isinstance(py_value, Enum):
return py_value.name
else:
return str(py_value)
def assert_deform(response, expected_form_data={}):
assert 'deform' in response.forms
form = response.forms['deform']
missing_fields = []
for field, value in expected_form_data.items():
if field == 'id':
continue
try:
form[field]
except AssertionError:
missing_fields.append(field)
continue
form_value = form[field].value
deform_value = python_to_deform_value(value)
assert form_value == deform_value, f'form field {field}: form value {form_value} != expected {deform_value}'
if missing_fields:
fields_str = ", ".join(missing_fields)
raise AssertionError(f"missing expected form fields: '{fields_str}'")
return form
def get_session(app, client):
serializer = app.browser_session_interface.get_signing_serializer(app)
assert 'session' in client.cookies
return serializer.loads(client.cookies['session'])
def _set_form_field_value(form, data, field_name, enum_field_names):
if field_name in enum_field_names:
value = data[field_name].name
else:
value = data[field_name]
form.set(field_name, value)
def fill_form(form, data, field_names=None, enum_field_names=[]):
if field_names is None:
for field_name in data:
_set_form_field_value(form, data, field_name, enum_field_names)
else:
for field_name in field_names:
_set_form_field_value(form, data, field_name, enum_field_names)
return form
| agpl-3.0 | Python |
badbf0ee8634101d6dcdca288670ca4fcb2b871e | Update __init__.py | ianunruh/hvac,ianunruh/hvac | hvac/api/secrets_engines/__init__.py | hvac/api/secrets_engines/__init__.py | """Vault secrets engines endpoints"""
from hvac.api.secrets_engines.aws import Aws
from hvac.api.secrets_engines.azure import Azure
from hvac.api.secrets_engines.gcp import Gcp
from hvac.api.secrets_engines.ad import ActiveDirectory
from hvac.api.secrets_engines.identity import Identity
from hvac.api.secrets_engines.kv import Kv
from hvac.api.secrets_engines.pki import Pki
from hvac.api.secrets_engines.kv_v1 import KvV1
from hvac.api.secrets_engines.kv_v2 import KvV2
from hvac.api.secrets_engines.transit import Transit
from hvac.api.secrets_engines.database import Database
from hvac.api.secrets_engines.consul import Consul
from hvac.api.vault_api_category import VaultApiCategory
__all__ = (
'Aws',
'Azure',
'Gcp',
'ActiveDirectory',
'Identity',
'Kv',
'KvV1',
'KvV2',
'Pki',
'Transit',
'SecretsEngines',
'Database'
)
class SecretsEngines(VaultApiCategory):
"""Secrets Engines."""
implemented_classes = [
Aws,
Azure,
Gcp,
ActiveDirectory,
Identity,
Kv,
Pki,
Transit,
Database,
Consul,
]
unimplemented_classes = [
'AliCloud',
'Azure',
'GcpKms',
'Nomad',
'RabbitMq',
'Ssh',
'TOTP',
'Cassandra',
'MongoDb',
'Mssql',
'MySql',
'PostgreSql',
]
| """Vault secrets engines endpoints"""
from hvac.api.secrets_engines.aws import Aws
from hvac.api.secrets_engines.azure import Azure
from hvac.api.secrets_engines.gcp import Gcp
from hvac.api.secrets_engines.identity import Identity
from hvac.api.secrets_engines.kv import Kv
from hvac.api.secrets_engines.pki import Pki
from hvac.api.secrets_engines.kv_v1 import KvV1
from hvac.api.secrets_engines.kv_v2 import KvV2
from hvac.api.secrets_engines.transit import Transit
from hvac.api.secrets_engines.database import Database
from hvac.api.secrets_engines.consul import Consul
from hvac.api.vault_api_category import VaultApiCategory
__all__ = (
'Aws',
'Azure',
'Gcp',
'Identity',
'Kv',
'KvV1',
'KvV2',
'Pki',
'Transit',
'SecretsEngines',
'Database'
)
class SecretsEngines(VaultApiCategory):
"""Secrets Engines."""
implemented_classes = [
Aws,
Azure,
Gcp,
Identity,
Kv,
Pki,
Transit,
Database,
Consul,
]
unimplemented_classes = [
'Ad',
'AliCloud',
'Azure',
'GcpKms',
'Nomad',
'RabbitMq',
'Ssh',
'TOTP',
'Cassandra',
'MongoDb',
'Mssql',
'MySql',
'PostgreSql',
]
| apache-2.0 | Python |
48cb6d920aa1ab030465882e978c8b9006e38b72 | Bump app version to 2020.6.0 | kernelci/kernelci-backend,kernelci/kernelci-backend | app/handlers/__init__.py | app/handlers/__init__.py | __version__ = "2020.6.0"
__versionfull__ = __version__
| __version__ = "2020.5.0"
__versionfull__ = __version__
| lgpl-2.1 | Python |
62c7b723638bf76463209834e1c50d0728951929 | add repr for shell outputs | sibson/dynoup,sibson/dynoup | scaler/models.py | scaler/models.py | from uuid import uuid4
from dynoup import db
from sqlalchemy.dialects.postgresql import UUID, JSON
class User(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True)
email = db.Column(db.String(120), nullable=False)
htoken = db.Column(db.String(512), nullable=False)
def __repr__(self):
return '<User {} ({})>'.format(self.email, self.id)
# track many-to-many relationship between apps and users
appusers = db.Table(
'appusers',
db.Column('user_id', UUID(as_uuid=True), db.ForeignKey('user.id')),
db.Column('app_id', UUID(as_uuid=True), db.ForeignKey('app.id'))
)
class App(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True)
name = db.Column(db.String(128), unique=True)
users = db.relationship('User', secondary=appusers, backref=db.backref('app'), lazy='dynamic')
checks = db.relationship('Check', backref=db.backref('app'), lazy='dynamic')
def __repr__(self):
return '<App {} ({})>'.format(self.name, self.id)
class Check(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
app_id = db.Column(UUID(as_uuid=True), db.ForeignKey('app.id'))
url = db.Column(db.String(256))
dynotype = db.Column(db.String(64))
params = db.Column(JSON(), default={})
def __repr__(self):
return '<Check {}/{} {}>'.format(self.app.name, self.dynotype, self.url)
| from uuid import uuid4
from dynoup import db
from sqlalchemy.dialects.postgresql import UUID, JSON
class User(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True)
email = db.Column(db.String(120), nullable=False)
htoken = db.Column(db.String(512), nullable=False)
# track many-to-many relationship between apps and users
appusers = db.Table(
'appusers',
db.Column('user_id', UUID(as_uuid=True), db.ForeignKey('user.id')),
db.Column('app_id', UUID(as_uuid=True), db.ForeignKey('app.id'))
)
class App(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True)
name = db.Column(db.String(128), unique=True)
users = db.relationship('User', secondary=appusers, backref=db.backref('app'), lazy='dynamic')
checks = db.relationship('Check', backref=db.backref('app'), lazy='dynamic')
class Check(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
app_id = db.Column(UUID(as_uuid=True), db.ForeignKey('app.id'))
url = db.Column(db.String(256))
dynotype = db.Column(db.String(64))
params = db.Column(JSON(), default={})
| mit | Python |
b67baab2bd60d597275ce39c6e91bdab4a55d464 | Fix doubles teardown (#135) | uber/doubles | doubles/pytest_plugin.py | doubles/pytest_plugin.py | import pytest
from doubles.lifecycle import teardown, verify
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
outcome = yield
try:
outcome.get_result()
verify()
finally:
teardown()
| import pytest
from doubles.lifecycle import teardown, verify
@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
outcome = yield
try:
outcome.get_result()
verify()
finally:
teardown()
| mit | Python |
e0ddd80ea2d23f9b5fc32dd8a5ea13f9cb30da49 | Add api for package search based on name and keywords | NikhilKalige/atom-website,NikhilKalige/atom-website,NikhilKalige/atom-website | app/packages/__init__.py | app/packages/__init__.py | from flask import Blueprint
packages = Blueprint('packages', __name__)
from . import views, models
from utils import github_data
def post_get_single(result=None, **kw):
result.update(result.pop("get_json"))
result.update(github_data(result['name'], result['author'], result['url']))
# runs for search request
def post_get_many(result=None, search_params=None, **kw):
for item in result["objects"]:
item.update(item.pop("get_json"))
def search_filter(search_params=None, **kw):
if (search_params is None) or search_params.get("name") is None:
return
def filter_string(name):
filter = []
filter.append(dict(name='name',
val='%' + name + '%',
op='like'
)
)
filter.append(dict(name="keywords__name",
val=name,
op="any"
))
return filter
search_params['filters'] = []
args = search_params['name'].split()
for item in args:
search_params['filters'].extend(filter_string(item))
search_params['disjunction'] = True
def api_creator(apimanager):
apimanager.create_api(models.Package, primary_key='name', methods=['GET'],
include_methods=['get_json'],
include_columns=[],
postprocessors={
'GET_SINGLE': [post_get_single],
'GET_MANY': [post_get_many]
})
apimanager.create_api(models.Package, primary_key='name',
collection_name='search',
methods=['GET'],
include_methods=['get_json'],
include_columns=[],
preprocessors={
'GET_MANY': [search_filter]
})
| from flask import Blueprint
packages = Blueprint('packages', __name__)
from . import views, models
from utils import github_data
def post_get_single(result=None, **kw):
result.update(result.pop("get_json"))
result.update(github_data(result['name'], result['author'], result['url']))
# runs for search request
def post_get_many(result=None, search_params=None, **kw):
for item in result["objects"]:
item.update(item.pop("get_json"))
def api_creator(apimanager):
apimanager.create_api(models.Package, primary_key='name', methods=['GET'],
include_methods=['get_json'],
include_columns=[],
postprocessors={
'GET_SINGLE': [post_get_single],
'GET_MANY': [post_get_many]
})
| bsd-2-clause | Python |
b6562fad5f8cfb3dec578f7c3613d75282ad4f45 | Bump version | shosca/django-rest-witchcraft | rest_witchcraft/__version__.py | rest_witchcraft/__version__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
__author__ = 'Serkan Hosca'
__author_email__ = 'serkan@hosca.com'
__version__ = '0.0.5'
__description__ = 'SQLAlchemy specific things for django-rest-framework'
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
__author__ = 'Serkan Hosca'
__author_email__ = 'serkan@hosca.com'
__version__ = '0.0.4'
__description__ = 'SQLAlchemy specific things for django-rest-framework'
| mit | Python |
b26b3c3a2d6c0d0ca6167cd787d61c3349199ac9 | Fix bug | vismantic-ohtuprojekti/image-filtering-suite,vismantic-ohtuprojekti/qualipy | imgfilter/analyzers/reduce_colors.py | imgfilter/analyzers/reduce_colors.py | """
Analyzer for reducing the number of colors in an image to a
certain amount. Uses the k-nearest neighbors method.
"""
from analyzer import Analyzer
import cv2
import numpy
def reduce_colors(image, colors):
"""Reduces the number of colors in a given image to certain
amount. The algorithm uses the k-nearest neighbors method to
do this. The given image must have colors, meaning three color
channels. The algorithm is taken from
"http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_kmeans
/py_kmeans_opencv/py_kmeans_opencv.html"
:param image: the image to process (must have three channels)
:type image: numpy.ndarray
:param colors: how many colors the final image should have
:type colors: int
:returns: numpy.ndarray
"""
Z = image.reshape((-1, 3)).astype(numpy.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(data=Z, K=colors, criteria=criteria,
attempts=10, flags=cv2.KMEANS_PP_CENTERS,
bestLabels=None)
center = numpy.uint8(center)
res = center[label.flatten()]
return res.reshape(image.shape)
class ReduceColors(Analyzer):
"""Analyzer for reducing the number of colors in an image"""
def __init__(self):
"""Initializes an analyzer for reducing colors"""
self.name = 'reduce_colors'
self.data = None
def run(self, image, image_path):
"""Runs the analyzer for reducing colors
:param image: the image matrix
:type image: numpy.ndarray
:param image_path: path to the image file
:type image_path: str
"""
color_image = cv2.imread(image_path)
self.data = reduce_colors(color_image, 2)
| """
Analyzer for reducing the number of colors in an image to a
certain amount. Uses the k-nearest neighbors method.
"""
from analyzer import Analyzer
import cv2
import numpy
def reduce_colors(image, colors):
"""Reduces the number of colors in a given image to certain
amount. The algorithm uses the k-nearest neighbors method to
do this. The given image must have colors, meaning three color
channels. The algorithm is taken from
"http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_kmeans
/py_kmeans_opencv/py_kmeans_opencv.html"
:param image: the image to process (must have three channels)
:type image: numpy.ndarray
:param colors: how many colors the final image should have
:type colors: int
:returns: numpy.ndarray
"""
Z = image.reshape((-1, 3)).astype(numpy.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(Z, colors, criteria, 10,
cv2.KMEANS_RANDOM_CENTERS)
center = numpy.uint8(center)
res = center[label.flatten()]
return res.reshape(image.shape)
class ReduceColors(Analyzer):
"""Analyzer for reducing the number of colors in an image"""
def __init__(self):
"""Initializes an analyzer for reducing colors"""
self.name = 'reduce_colors'
self.data = None
def run(self, image, image_path):
"""Runs the analyzer for reducing colors
:param image: the image matrix
:type image: numpy.ndarray
:param image_path: path to the image file
:type image_path: str
"""
color_image = cv2.imread(image_path)
self.data = reduce_colors(color_image, 2)
| mit | Python |
f25843bfe8bbe581c575630c41f8c4e686f808a6 | Use print_function | treemo/circuits,treemo/circuits,eriol/circuits,treemo/circuits,nizox/circuits,eriol/circuits,eriol/circuits | fabfile/help.py | fabfile/help.py | # Module: help
# Date: 28th November 2013
# Author: James Mills, j dot mills at griffith dot edu dot au
"""Help Tasks"""
from __future__ import print_function
from fabric import state
from fabric.api import task
from fabric.tasks import Task
from fabric.task_utils import crawl
@task(default=True)
def help(name=None):
"""Display help for a given task
Options:
name - The task to display help on.
To display a list of available tasks type:
$ fab -l
To display help on a specific task type:
$ fab help:<name>
"""
if name is None:
name = "help"
task = crawl(name, state.commands)
if isinstance(task, Task):
doc = getattr(task, "__doc__", None)
if doc is not None:
print("Help on {0:s}:".format(name))
print()
print(doc)
else:
print("No help available for {0;s}".format(name))
else:
print("No such task {0:s}".format(name))
print("For a list of tasks type: fab -l")
| # Module: help
# Date: 28th November 2013
# Author: James Mills, j dot mills at griffith dot edu dot au
"""Help Tasks"""
from fabric import state
from fabric.api import task
from fabric.tasks import Task
from fabric.task_utils import crawl
@task(default=True)
def help(name=None):
"""Display help for a given task
Options:
name - The task to display help on.
To display a list of available tasks type:
$ fab -l
To display help on a specific task type:
$ fab help:<name>
"""
if name is None:
name = "help"
task = crawl(name, state.commands)
if isinstance(task, Task):
doc = getattr(task, "__doc__", None)
if doc is not None:
print("Help on {0:s}:".format(name))
print()
print(doc)
else:
print("No help available for {0;s}".format(name))
else:
print("No such task {0:s}".format(name))
print("For a list of tasks type: fab -l")
| mit | Python |
0b91f045260396308a2cda207765cef7e0a62f7c | work on the grapher | bh107/benchpress,bh107/benchpress,bh107/benchpress,bh107/benchpress | grapher/npbackend.py | grapher/npbackend.py | from graph import *
import numpy as np
class Npbackend(Graph):
def render(self, data, order=None, baseline=None, highest=None):
#Lets generate the vcache=10 graphs and with matmul
for s in set([script for script, bridge, vem, ve, r in data]):
res = {}
for script, bridge, vem, ve, r in data:
if script == s:
res[bridge + ve] = (np.mean(r['elapsed']), np.std(r['elapsed'])*2)
means = []
stderr = []
for r in [res['NumPy OriginalN/A'],
res['npbacked-numpy (vcache=10)N/A'],
res['npbacked-numexpr (vcache=10)N/A'],
res['BohriumCPU'],
res['npbacked-pygpu (vcache=10)N/A'],
res['BohriumGPU']]:
means.append(r[0])
stderr.append(r[1])
names = ['Native', 'NumPy', 'Numexpr', 'Bohrium-CPU', 'libgpuarray', 'Bohrium-GPU']
self.graph_title = ""
self.prep() # Prep it / clear the drawing board
idx = np.arange(len(names))
bar(idx, means, align='center', alpha=0.5, ecolor='black', yerr=stderr)
xticks(idx, names)
setp(xticks()[1], rotation=25)
xlabel("")
fig = gcf()
fig.tight_layout()
subplots_adjust()
self.to_file(s) # Spit them out to file
| from graph import *
import numpy as np
class Npbackend(Graph):
def render(self, data, order=None, baseline=None, highest=None):
#Lets generate the vcache=10 graphs and with matmul
for s in set([script for script, bridge, vem, ve, r in data]):
res = {}
for script, bridge, vem, ve, r in data:
if script == s:
res[bridge + ve] = (np.mean(r['elapsed']), np.std(r['elapsed'])*2)
means = []
stderr = []
for r in [res['NumPy OriginalN/A'],
res['npbacked-numpy (vcache=10)N/A'],
res['npbacked-numexpr (vcache=10)N/A'],
res['npbacked-pygpu (vcache=10)N/A'],
res['BohriumGPU'],
res['BohriumCPU']]:
means.append(r[0])
stderr.append(r[1])
names = ['Native', 'NumPy', 'Numexpr', 'libgpuarray', 'Bohrium-GPU', 'Bohrium-CPU']
self.graph_title = ""
self.prep() # Prep it / clear the drawing board
idx = np.arange(len(names))
bar(idx, means, align='center', alpha=0.5, ecolor='black', yerr=stderr)
xticks(idx, names)
setp(xticks()[1], rotation=25)
xlabel("")
fig = gcf()
fig.tight_layout()
subplots_adjust()
self.to_file(s) # Spit them out to file
| apache-2.0 | Python |
8b143188b79c9a8b89ad44d2921d4f4eb2ad8cf4 | Simplify definition and add typing | pyGrowler/Growler,akubera/Growler | growler/responder.py | growler/responder.py | #
# growler/responder.py
#
"""
Event loop independent class for managing clients' requests and
server responses.
"""
from typing import Optional
from asyncio import BaseTransport
from socket import socket as Socket
from abc import ABC, abstractmethod
class GrowlerResponder(ABC):
"""
Abstract base class for 'responder' objects that handle the
stream of client data.
Responders are designed to be event-loop independent, so
applications may change backend without lots of effort.
Unfortunately, this means that responders should NOT use
constructs provided by specific libraries (such as asyncio) and
instead try to use as much from standard python as they can.
"""
@abstractmethod
def on_data(self, data):
raise NotImplementedError()
class CoroutineResponder(GrowlerResponder):
"""
Special responder object that will 'send' data to a coroutine
object for processing.
"""
def __init__(self, coro):
self._coro = coro
def on_data(self, data):
self._coro.send(data)
class ResponderHandler:
"""
A common interface for classes that handle GrowlerResponder
objects.
The default implementation is the protocol object found in
growler.aio.protocol.
"""
__slots__ = (
'transport',
)
transport: Optional[BaseTransport]
@property
def socket(self) -> Optional[Socket]:
return (self.transport.get_extra_info('socket')
if self.transport is not None
else None)
@property
def peername(self):
return (self.transport.get_extra_info('peername')
if self.transport is not None
else None)
@property
def cipher(self):
return (self.transport.get_extra_info('cipher')
if self.transport is not None
else None)
@property
def remote_hostname(self):
return (self.peername[0]
if self.transport is not None
else None)
@property
def remote_port(self):
return (self.peername[1]
if self.transport is not None
else None)
# clean namespace
del ABC
del abstractmethod
del BaseTransport
del Optional
del Socket
| #
# growler/responder.py
#
"""
Event loop independent class for managing clients' requests and
server responses.
"""
import abc
class GrowlerResponder(abc.ABC):
"""
Abstract base class for 'responder' objects that handle the
stream of client data.
Responders are designed to be event-loop independent, so
applications may change backend without lots of effort.
Unfortunately, this means that responders should NOT use
constructs provided by specific libraries (such as asyncio) and
instead try to use as much from standard python as they can.
"""
@abc.abstractmethod
def on_data(self, data):
raise NotImplementedError()
class CoroutineResponder(GrowlerResponder):
"""
Special responder object that will 'send' data to a coroutine
object for processing.
"""
def __init__(self, coro):
self._coro = coro
def on_data(self, data):
self._coro.send(data)
class ResponderHandler(abc.ABC):
"""
A common interface for classes that handle GrowlerResponder
objects.
The default implementation is the protocol object found in
growler.aio.protocol.
"""
@abc.abstractproperty
def socket(self):
pass
@abc.abstractproperty
def peername(self):
pass
@abc.abstractproperty
def transport(self):
pass
@property
def remote_hostname(self):
return self.peername[0]
@property
def remote_port(self):
return self.peername[1]
| apache-2.0 | Python |
4bc50fc3552ce537727895c7dc60bf3559249457 | Tag new release: 2.2.11 | Floobits/floobits-sublime,Floobits/floobits-sublime | floo/version.py | floo/version.py | PLUGIN_VERSION = '2.2.11'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| PLUGIN_VERSION = '2.2.10'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| apache-2.0 | Python |
127f26069622d78c3255079808211a00dec3af64 | Fix for issue #508 MenuBarPopupPanel should not be modal | gpitel/pyjs,pombredanne/pyjs,Hasimir/pyjs,gpitel/pyjs,minghuascode/pyj,anandology/pyjamas,minghuascode/pyj,Hasimir/pyjs,lancezlin/pyjs,lancezlin/pyjs,pyjs/pyjs,spaceone/pyjs,pombredanne/pyjs,minghuascode/pyj,Hasimir/pyjs,lancezlin/pyjs,spaceone/pyjs,gpitel/pyjs,lancezlin/pyjs,spaceone/pyjs,pombredanne/pyjs,spaceone/pyjs,gpitel/pyjs,pyjs/pyjs,pombredanne/pyjs,pyjs/pyjs,anandology/pyjamas,anandology/pyjamas,pyjs/pyjs,minghuascode/pyj,anandology/pyjamas,Hasimir/pyjs | library/pyjamas/ui/MenuBarPopupPanel.py | library/pyjamas/ui/MenuBarPopupPanel.py | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from PopupPanel import PopupPanel
class MenuBarPopupPanel(PopupPanel):
def __init__(self, item, **kwargs):
self.item = item
kwargs['Widget'] = item.getSubMenu()
PopupPanel.__init__(self, True, False, **kwargs)
item.getSubMenu().onShow()
def onEventPreview(self, event):
type = DOM.eventGetType(event)
if type == "click":
target = DOM.eventGetTarget(event)
parentMenuElement = self.item.getParentMenu().getElement()
if DOM.isOrHasChild(parentMenuElement, target):
return False
return PopupPanel.onEventPreview(self, event)
Factory.registerClass('pyjamas.ui.MenuBarPopupPanel', 'MenuBarPopupPanel', MenuBarPopupPanel)
| # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from PopupPanel import PopupPanel
class MenuBarPopupPanel(PopupPanel):
def __init__(self, item, **kwargs):
self.item = item
kwargs['Widget'] = item.getSubMenu()
PopupPanel.__init__(self, True, **kwargs)
item.getSubMenu().onShow()
def onEventPreview(self, event):
type = DOM.eventGetType(event)
if type == "click":
target = DOM.eventGetTarget(event)
parentMenuElement = self.item.getParentMenu().getElement()
if DOM.isOrHasChild(parentMenuElement, target):
return False
return PopupPanel.onEventPreview(self, event)
Factory.registerClass('pyjamas.ui.MenuBarPopupPanel', 'MenuBarPopupPanel', MenuBarPopupPanel)
| apache-2.0 | Python |
b95694b88a8b18a6d7ef932fd365949a2aa97322 | Add test vectors with decryption | jvarho/totp-client | test-totp.py | test-totp.py | #!/usr/bin/env python
# Copyright (c) 2015, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''Unit tests for totp.py'''
import unittest
import totp
import time
class TOTPTests(unittest.TestCase):
"""Tests TOTP"""
def _test_vector(self, secret, at, token, pwd=None, **kwargs):
t = totp.TOTP(secret, **kwargs)
if pwd is not None:
t.dec_key(pwd)
tmp = time.time
try:
time.time = lambda:at
o = t.token()
finally:
time.time = tmp
self.assertEqual(token, o)
def test_vector_n1(self):
self._test_vector('1234', 10, '110366')
def test_vector_n2(self):
self._test_vector('1234', 20, '110366')
def test_vector_n3(self):
self._test_vector('1234', 40, '336582')
def test_vector_n4(self):
self._test_vector('1234', 10, '8110366', h_length=7)
def test_vector_n5(self):
self._test_vector('1234', 10, '18110366', h_length=8)
def test_vector_n6(self):
self._test_vector('1234', 10, '127174', h_hash='sha256')
def test_vector_n7(self):
self._test_vector('1234', 10, '637043', h_hash='sha512')
def test_vector_n8(self):
self._test_vector('1234', 100, '110366', t_zero=90)
def test_vector_n9(self):
self._test_vector('1234', 40, '110366', t_timeout=60)
def test_vector_n10(self):
self._test_vector('1234', 100, '336582', t_timeout=60)
def test_vector_p1(self):
self._test_vector('asdf', 10, '036575', pwd='qwerty', salt='1234')
def test_vector_p2(self):
self._test_vector('asdf', 20, '036575', pwd='qwerty', salt='1234')
def test_vector_p3(self):
self._test_vector('asdf', 40, '865509', pwd='qwerty', salt='1234')
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TOTPTests))
unittest.TextTestRunner().run(suite)
| #!/usr/bin/env python
# Copyright (c) 2015, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''Unit tests for totp.py'''
import unittest
import totp
import time
class TOTPTests(unittest.TestCase):
"""Tests TOTP"""
def _test_vector(self, secret, at, token, **kwargs):
t = totp.TOTP(secret, **kwargs)
tmp = time.time
try:
time.time = lambda:at
o = t.token()
finally:
time.time = tmp
self.assertEqual(token, o)
def test_vector_n1(self):
self._test_vector('1234', 10, '110366')
def test_vector_n2(self):
self._test_vector('1234', 20, '110366')
def test_vector_n3(self):
self._test_vector('1234', 40, '336582')
def test_vector_n4(self):
self._test_vector('1234', 10, '8110366', h_length=7)
def test_vector_n5(self):
self._test_vector('1234', 10, '18110366', h_length=8)
def test_vector_n6(self):
self._test_vector('1234', 10, '127174', h_hash='sha256')
def test_vector_n7(self):
self._test_vector('1234', 10, '637043', h_hash='sha512')
def test_vector_n8(self):
self._test_vector('1234', 100, '110366', t_zero=90)
def test_vector_n9(self):
self._test_vector('1234', 40, '110366', t_timeout=60)
def test_vector_n10(self):
self._test_vector('1234', 100, '336582', t_timeout=60)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TOTPTests))
unittest.TextTestRunner().run(suite)
| isc | Python |
9bfe2dbd37fa18ed7915e82dc8dc8515d7fe9a76 | Terminate child processes on SIGTERM signal | alfredhq/alfred-collector | alfred_collector/__main__.py | alfred_collector/__main__.py | import argparse
import signal
import yaml
from functools import partial
from .process import CollectorProcess
def get_config(path):
with open(path) as file:
return yaml.load(file)
def terminate_processes(processes, signum, frame):
for process in processes:
if process is not None and process.is_alive():
process.terminate()
process.join()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config')
args = parser.parse_args()
config = get_config(args.config)
processes = []
database_uri = config['database_uri']
for socket_address in config['collectors']:
process = CollectorProcess(database_uri, socket_address)
process.start()
processes.append(process)
signal.signal(signal.SIGTERM, partial(terminate_processes, processes))
for process in processes:
process.join()
if __name__ == '__main__':
main()
| import argparse
import yaml
from .process import CollectorProcess
def get_config(path):
with open(path) as file:
return yaml.load(file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config')
args = parser.parse_args()
config = get_config(args.config)
processes = []
database_uri = config['database_uri']
for socket_address in config['collectors']:
process = CollectorProcess(database_uri, socket_address)
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == '__main__':
main()
| isc | Python |
816bec01d51a93567db6838f5afa57913ecc538c | Fix collections.iterable warning (#553) | keon/algorithms | algorithms/arrays/flatten.py | algorithms/arrays/flatten.py | """
Implement Flatten Arrays.
Given an array that may contain nested arrays,
produce a single resultant array.
"""
from collections.abc import Iterable
# return list
def flatten(input_arr, output_arr=None):
if output_arr is None:
output_arr = []
for ele in input_arr:
if isinstance(ele, Iterable):
flatten(ele, output_arr) #tail-recursion
else:
output_arr.append(ele) #produce the result
return output_arr
# returns iterator
def flatten_iter(iterable):
"""
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
"""
for element in iterable:
if isinstance(element, Iterable):
yield from flatten_iter(element)
else:
yield element
| """
Implement Flatten Arrays.
Given an array that may contain nested arrays,
produce a single resultant array.
"""
from collections import Iterable
# return list
def flatten(input_arr, output_arr=None):
if output_arr is None:
output_arr = []
for ele in input_arr:
if isinstance(ele, Iterable):
flatten(ele, output_arr) #tail-recursion
else:
output_arr.append(ele) #produce the result
return output_arr
# returns iterator
def flatten_iter(iterable):
"""
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
"""
for element in iterable:
if isinstance(element, Iterable):
yield from flatten_iter(element)
else:
yield element
| mit | Python |
917b84f4d985e211168967c2fda6ea0b0b2ffe61 | Add explicit branching in AirVisual diagnostics (#64493) | mezz64/home-assistant,rohitranjan1991/home-assistant,mezz64/home-assistant,rohitranjan1991/home-assistant,rohitranjan1991/home-assistant,GenericStudent/home-assistant,nkgilley/home-assistant,w1ll1am23/home-assistant,GenericStudent/home-assistant,toddeye/home-assistant,toddeye/home-assistant,w1ll1am23/home-assistant,nkgilley/home-assistant | homeassistant/components/airvisual/diagnostics.py | homeassistant/components/airvisual/diagnostics.py | """Diagnostics support for AirVisual."""
from __future__ import annotations
from types import MappingProxyType
from typing import Any
from homeassistant.components.diagnostics import REDACTED
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_STATE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_CITY, CONF_COUNTRY, DOMAIN
CONF_COORDINATES = "coordinates"
@callback
def _async_redact_data(data: MappingProxyType | dict) -> dict[str, Any]:
"""Redact sensitive data in a dict."""
redacted = {**data}
for key, value in redacted.items():
if key in (
CONF_API_KEY,
CONF_CITY,
CONF_COORDINATES,
CONF_COUNTRY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_STATE,
):
redacted[key] = REDACTED
elif isinstance(value, dict):
redacted[key] = _async_redact_data(value)
return redacted
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
return {
"entry": {
"title": entry.title,
"data": _async_redact_data(entry.data),
"options": _async_redact_data(entry.options),
},
"data": _async_redact_data(coordinator.data["data"]),
}
| """Diagnostics support for AirVisual."""
from __future__ import annotations
from types import MappingProxyType
from typing import Any
from homeassistant.components.diagnostics import REDACTED
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_STATE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_CITY, CONF_COUNTRY, DOMAIN
CONF_COORDINATES = "coordinates"
@callback
def _async_redact_data(data: MappingProxyType | dict) -> dict[str, Any]:
"""Redact sensitive data in a dict."""
redacted = {**data}
for key, value in redacted.items():
if key in (
CONF_API_KEY,
CONF_CITY,
CONF_COORDINATES,
CONF_COUNTRY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_STATE,
):
redacted[key] = REDACTED
if isinstance(value, dict):
redacted[key] = _async_redact_data(value)
return redacted
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
return {
"entry": {
"title": entry.title,
"data": _async_redact_data(entry.data),
"options": _async_redact_data(entry.options),
},
"data": _async_redact_data(coordinator.data["data"]),
}
| apache-2.0 | Python |
50b0f2e7f271fa4db2e507d0eaa559c31aecc1ee | Fix atom feed | OmeGak/indico,DirkHoffmann/indico,DirkHoffmann/indico,mvidalgarcia/indico,ThiefMaster/indico,OmeGak/indico,pferreir/indico,DirkHoffmann/indico,mvidalgarcia/indico,mic4ael/indico,ThiefMaster/indico,mic4ael/indico,indico/indico,OmeGak/indico,ThiefMaster/indico,indico/indico,DirkHoffmann/indico,mvidalgarcia/indico,indico/indico,OmeGak/indico,mvidalgarcia/indico,mic4ael/indico,pferreir/indico,mic4ael/indico,pferreir/indico,pferreir/indico,indico/indico,ThiefMaster/indico | indico/web/http_api/metadata/atom.py | indico/web/http_api/metadata/atom.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
import dateutil.parser
from pyatom import AtomFeed
from pytz import timezone, utc
from indico.util.string import unicodeOrNone
from indico.web.http_api.metadata.serializer import Serializer
def _deserialize_date(date_dict):
if isinstance(date_dict, datetime):
return date_dict
dt = datetime.combine(dateutil.parser.parse(date_dict['date']).date(),
dateutil.parser.parse(date_dict['time']).time())
return timezone(date_dict['tz']).localize(dt).astimezone(utc)
class AtomSerializer(Serializer):
schemaless = False
_mime = 'application/atom+xml'
def _execute(self, fossils):
results = fossils['results']
if type(results) != list:
results = [results]
feed = AtomFeed(
title='Indico Feed',
feed_url=fossils['url']
)
for fossil in results:
feed.add(
title=unicodeOrNone(fossil['title']),
summary=unicodeOrNone(fossil['description']),
url=fossil['url'],
updated=_deserialize_date(fossil['startDate']) # ugh, but that's better than creationDate
)
return feed.to_string()
| # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
# python stdlib imports
from pyatom import AtomFeed
# indico imports
from indico.util.string import unicodeOrNone
# module imports
from indico.web.http_api.metadata.serializer import Serializer
class AtomSerializer(Serializer):
schemaless = False
_mime = 'application/atom+xml'
def _execute(self, fossils):
results = fossils['results']
if type(results) != list:
results = [results]
feed = AtomFeed(
title='Indico Feed',
feed_url=fossils['url']
)
for fossil in results:
feed.add(
title=unicodeOrNone(fossil['title']),
summary=unicodeOrNone(fossil['description']),
url=fossil['url'],
updated=fossil['startDate'] # ugh, but that's better than creationDate
)
return feed.to_string()
| mit | Python |
3aadc8a8c469dcaad7217fea82b3ef92c7e9934d | remove unused Mixin | noisyboiler/wampy | wampy/roles/publisher.py | wampy/roles/publisher.py | import logging
from wampy.errors import WampyError
from wampy.messages.publish import Publish
logger = logging.getLogger('wampy.publishing')
class PublishProxy:
def __init__(self, client):
self.client = client
def __call__(self, *unsupported_args, **kwargs):
if len(unsupported_args) != 0:
raise WampyError(
"wampy only supports publishing keyword arguments "
"to a Topic."
)
topic = kwargs.pop("topic")
if not kwargs:
raise WampyError(
"wampy requires at least one message to publish to a topic"
)
message = Publish(topic=topic, options={}, **kwargs)
logger.info('publishing message: "%s"', message)
self.client.send_message(message)
| import logging
from wampy.errors import WampyError
from wampy.messages.publish import Publish
logger = logging.getLogger('wampy.publishing')
class PublishProxy:
def __init__(self, client):
self.client = client
def __call__(self, *unsupported_args, **kwargs):
if len(unsupported_args) != 0:
raise WampyError(
"wampy only supports publishing keyword arguments "
"to a Topic."
)
topic = kwargs.pop("topic")
if not kwargs:
raise WampyError(
"wampy requires at least one message to publish to a topic"
)
message = Publish(topic=topic, options={}, **kwargs)
logger.info('publishing message: "%s"', message)
self.client.send_message(message)
class PublisherMixin:
@property
def publish(self):
return PublishProxy(client=self)
| mpl-2.0 | Python |
547f5c7de0803bbec9873db7e613d3ff7cad24ed | Add server logging to contrib operations | pferreir/indico,pferreir/indico,mvidalgarcia/indico,indico/indico,indico/indico,mic4ael/indico,OmeGak/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,mvidalgarcia/indico,ThiefMaster/indico,ThiefMaster/indico,DirkHoffmann/indico,ThiefMaster/indico,DirkHoffmann/indico,mvidalgarcia/indico,ThiefMaster/indico,mic4ael/indico,DirkHoffmann/indico,pferreir/indico,mic4ael/indico,indico/indico,DirkHoffmann/indico,indico/indico,mic4ael/indico,OmeGak/indico,OmeGak/indico | indico/modules/events/contributions/operations.py | indico/modules/events/contributions/operations.py | # This file is part of Indico.# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session
from indico.core.db import db
from indico.modules.events.contributions import logger
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.logs.models.entries import EventLogRealm, EventLogKind
def create_contribution(event, data):
contrib = Contribution(event_new=event)
contrib.populate_from_dict(data)
db.session.flush()
logger.info('Contribution {} created by {}'.format(contrib, session.user))
contrib.event_new.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contribution "{}" has been created'.format(contrib.title), session.user)
return contrib
def update_contribution(contrib, data):
contrib.populate_from_dict(data)
db.session.flush()
logger.info('Contribution {} updated by {}'.format(contrib, session.user))
contrib.event_new.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
'Contribution "{}" has been updated'.format(contrib.title), session.user)
def delete_contribution(contrib):
contrib.is_deleted = True
db.session.flush()
logger.info('Contribution {} deleted by {}'.format(contrib, session.user))
contrib.event_new.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
'Contribution "{}" has been deleted'.format(contrib.title), session.user)
| # This file is part of Indico.# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session
from indico.core.db import db
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.logs.models.entries import EventLogRealm, EventLogKind
def create_contribution(event, data):
contrib = Contribution(event_new=event)
contrib.populate_from_dict(data)
db.session.flush()
event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contribution "{}" has been created'.format(contrib.title), session.user)
return contrib
def update_contribution(contrib, data):
contrib.populate_from_dict(data)
db.session.flush()
contrib.event_new.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
'Contribution "{}" has been updated'.format(contrib.title), session.user)
def delete_contribution(contrib):
contrib.is_deleted = True
db.session.flush()
contrib.event_new.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
'Contribution "{}" has been deleted'.format(contrib.title), session.user)
| mit | Python |
c24b1eb0e269c9cd59b7ae0d9a4388fc8264b188 | Add a failing test for the linebreak presence | dulaccc/igenstrings,dulaccc/igenstrings,dulaccc/igenstrings,dulaccc/igenstrings | tests/test_merger.py | tests/test_merger.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_igenstrings
----------------------------------
Tests for `igenstrings` module.
"""
import os
import unittest
from codecs import open
from py import path
import pytest
from igenstrings.merger import Merger
@pytest.fixture(autouse=True)
def initdir(tmpdir):
fixture_basename = 'tests/objc'
fixture_path = path.local(fixture_basename)
fixture_path.copy(tmpdir / fixture_basename)
tmpdir.chdir() # change to pytest-provided temporary directory
def test_create_localizable_strings():
merger = Merger('tests/objc/new', None)
assert not os.path.exists('tests/objc/new/en.lproj/Localizable.strings')
assert not os.path.exists('tests/objc/new/fr.lproj/Localizable.strings')
merger.merge_localized_strings()
assert os.path.exists('tests/objc/new/en.lproj/Localizable.strings')
assert os.path.exists('tests/objc/new/fr.lproj/Localizable.strings')
content = None
with open('tests/objc/new/en.lproj/Localizable.strings', encoding='utf16', mode='r') as en_locale_file:
content = en_locale_file.read()
assert 'Hi' in content
def test_keep_existing_translated_strings():
merger = Merger('tests/objc/existing', None)
merger.merge_localized_strings()
content = None
with open('tests/objc/existing/fr.lproj/Localizable.strings', encoding='utf16', mode='r') as fr_locale_file:
content = fr_locale_file.read()
assert 'Bonjour' in content
def test_merge_new_translated_strings():
merger = Merger('tests/objc/existing', None)
merger.merge_localized_strings()
content = None
with open('tests/objc/existing/fr.lproj/Localizable.strings', encoding='utf16', mode='r') as fr_locale_file:
content = fr_locale_file.read()
assert 'How are you doing' in content
def test_linebreak_between_strings():
merger = Merger('tests/objc/existing', None)
merger.merge_localized_strings()
content = None
with open('tests/objc/existing/en.lproj/Localizable.strings', encoding='utf16', mode='r') as en_locale_file:
content = en_locale_file.read()
assert content == """/* title for the simple object */
"Hi %@ !" = "Hi %@ !";
/* subtitle for the simple object */
"How are you doing today" = "How are you doing today";
"""
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_igenstrings
----------------------------------
Tests for `igenstrings` module.
"""
import os
import unittest
from codecs import open
from py import path
import pytest
from igenstrings.merger import Merger
@pytest.fixture(autouse=True)
def initdir(tmpdir):
fixture_basename = 'tests/objc'
fixture_path = path.local(fixture_basename)
fixture_path.copy(tmpdir / fixture_basename)
tmpdir.chdir() # change to pytest-provided temporary directory
def test_create_localizable_strings():
merger = Merger('tests/objc/new', None)
assert not os.path.exists('tests/objc/new/en.lproj/Localizable.strings')
assert not os.path.exists('tests/objc/new/fr.lproj/Localizable.strings')
merger.merge_localized_strings()
assert os.path.exists('tests/objc/new/en.lproj/Localizable.strings')
assert os.path.exists('tests/objc/new/fr.lproj/Localizable.strings')
content = None
with open('tests/objc/new/en.lproj/Localizable.strings', encoding='utf16', mode='r') as en_locale_file:
content = en_locale_file.read()
assert 'Hi' in content
def test_keep_existing_translated_strings():
merger = Merger('tests/objc/existing', None)
merger.merge_localized_strings()
content = None
with open('tests/objc/existing/fr.lproj/Localizable.strings', encoding='utf16', mode='r') as fr_locale_file:
content = fr_locale_file.read()
assert 'Bonjour' in content
def test_merge_new_translated_strings():
merger = Merger('tests/objc/existing', None)
merger.merge_localized_strings()
content = None
with open('tests/objc/existing/fr.lproj/Localizable.strings', encoding='utf16', mode='r') as fr_locale_file:
content = fr_locale_file.read()
assert 'How are you doing' in content
| mit | Python |
692fe65ca9d24286d10e542c5028924a22036362 | Add encoding and unicode literals import | Turbasen/turbasen.py | tests/test_models.py | tests/test_models.py | # encoding: utf-8
from __future__ import unicode_literals
import pytest
import turbasen
@pytest.fixture
def configure_dev():
turbasen.configure(ENDPOINT_URL='http://dev.nasjonalturbase.no/')
@pytest.mark.skipif(turbasen.settings.Settings.API_KEY is None, reason="API key not set")
def test_get(configure_dev):
sted = turbasen.Sted.get('52407fb375049e561500004e')
assert sted.navn == u'Tjørnbrotbu'
assert sted.ssr_id == 382116
@pytest.mark.skipif(turbasen.settings.Settings.API_KEY is None, reason="API key not set")
def test_lookup(configure_dev):
results = turbasen.Sted.lookup(pages=2)
result_list = list(results)
assert len(result_list) == turbasen.settings.Settings.LIMIT * 2
assert result_list[0].object_id != ''
| import pytest
import turbasen
@pytest.fixture
def configure_dev():
turbasen.configure(ENDPOINT_URL='http://dev.nasjonalturbase.no/')
@pytest.mark.skipif(turbasen.settings.Settings.API_KEY is None, reason="API key not set")
def test_get(configure_dev):
sted = turbasen.Sted.get('52407fb375049e561500004e')
assert sted.navn == u'Tjørnbrotbu'
assert sted.ssr_id == 382116
@pytest.mark.skipif(turbasen.settings.Settings.API_KEY is None, reason="API key not set")
def test_lookup(configure_dev):
results = turbasen.Sted.lookup(pages=2)
result_list = list(results)
assert len(result_list) == turbasen.settings.Settings.LIMIT * 2
assert result_list[0].object_id != ''
| mit | Python |
50bb03002c04512c858e97f29ad1c8ea15fef701 | Edit staff and fellow ids | Alweezy/alvin-mutisya-dojo-project | tests/test_people.py | tests/test_people.py | from models.people import Person, Fellow, Staff
from unittest import TestCase
class PersonTestCases(TestCase):
"""Tests the functionality of the person parent class
"""
def setUp(self):
"""Passes an instance of class Person to all the methods in this class
"""
self.person = Person('Oluwafemi', 'Sule', 'Fellow')
def test_full_name_is_correct(self):
self.assertEqual(self.person.fname + ' ' + self.person.lname, 'Oluwafemi Sule')
class FellowTestCases(TestCase):
def setUp(self):
self.fellow = Fellow('Nadia', 'Alexis', 'Fellow')
def test_if_inherits_from_Person(self):
self.assertTrue(issubclass(Fellow, Person))
def test_person_name_is_correct(self):
self.assertEqual(self.fellow.fname + ' ' + self.fellow.lname, 'Nadia Alexis')
def test_fellow_id_generation(self):
self.assertEqual(self.fellow.id, 'fel43')
class StaffTestCases(TestCase):
def setUp(self):
self.staff = Staff('Nadia', 'Alexis', 'Staff')
def test_if_inherits_from_Person(self):
self.assertTrue(issubclass(Staff, Person))
def test_full_name_is_correct(self):
self.assertEqual(self.staff.fname + ' ' + self.staff.lname, 'Nadia Alexis')
def test_staff_id_generation(self):
self.assertEqual(self.staff.id, 'stf48')
| from models.people import Person, Fellow, Staff
from unittest import TestCase
class PersonTestCases(TestCase):
"""Tests the functionality of the person parent class
"""
def setUp(self):
"""Passes an instance of class Person to all the methods in this class
"""
self.person = Person('Oluwafemi', 'Sule', 'Fellow')
def test_full_name_is_correct(self):
self.assertEqual(self.person.fname + ' ' + self.person.lname, 'Oluwafemi Sule')
class FellowTestCases(TestCase):
def setUp(self):
self.fellow = Fellow('Nadia', 'Alexis', 'Fellow')
def test_if_inherits_from_Person(self):
self.assertTrue(issubclass(Fellow, Person))
def test_person_name_is_correct(self):
self.assertEqual(self.fellow.fname + ' ' + self.fellow.lname, 'Nadia Alexis')
def test_fellow_id_generation(self):
self.assertEqual(self.fellow.id, 'fel36')
class StaffTestCases(TestCase):
def setUp(self):
self.staff = Staff('Nadia', 'Alexis', 'Staff')
def test_if_inherits_from_Person(self):
self.assertTrue(issubclass(Staff, Person))
def test_full_name_is_correct(self):
self.assertEqual(self.staff.fname + ' ' + self.staff.lname, 'Nadia Alexis')
def test_staff_id_generation(self):
self.assertEqual(self.staff.id, 'stf41')
| mit | Python |
abd53f3dba7b2cca7dbe50cf83b3622048771a99 | add test_report() test | jendrikseipp/vulture,jendrikseipp/vulture | tests/test_script.py | tests/test_script.py | import os.path
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(DIR)
def call_vulture(args, **kwargs):
return subprocess.call(
[sys.executable, 'vulture.py'] + args, cwd=REPO, **kwargs)
def test_script():
assert call_vulture(['whitelist.py', 'vulture.py']) == 0
def test_exclude():
assert call_vulture(['vulture.py', '--exclude', 'vulture.py']) == 0
def test_missing_file():
assert call_vulture(['missing.py']) == 1
def test_dir():
assert call_vulture(['tests']) == 0
def test_report():
assert call_vulture(['vulture.py']) == 1
| import os.path
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(DIR)
def call_vulture(args, **kwargs):
return subprocess.call(
[sys.executable, 'vulture.py'] + args, cwd=REPO, **kwargs)
def test_script():
assert call_vulture(['whitelist.py', 'vulture.py']) == 0
def test_exclude():
assert call_vulture(['--exclude', 'vulture.py']) == 0
def test_missing_file():
assert call_vulture(['missing.py']) == 1
def test_dir():
assert call_vulture(['tests']) == 0
| mit | Python |
ad2ca3753ac73feccb68472c1c18f1f9201d958f | Remove now pointless test | Mause/resumable | tests/test_simple.py | tests/test_simple.py | #!/usr/bin/env python3
from nose.tools import eq_
from resumable import rebuild, value
def test_simple():
@rebuild
def function(original):
return value(original.upper())
original = 'hello'
original = function['function'](original)
eq_(original, 'HELLO')
def test_nested():
@rebuild
def first(a):
@rebuild
def second(b):
return value(b + 'b')
return value(second['second'](a))
original = 'ba'
original = first['first'](original)
eq_(original, 'bab')
if __name__ == '__main__':
test_simple()
| #!/usr/bin/env python3
from nose.tools import eq_
from resumable import rebuild, value
def test_simple():
@rebuild
def function(original):
return value(original.upper())
original = 'hello'
original = function['function'](original)
eq_(original, 'HELLO')
def test_value():
@rebuild
def function(original):
return value(original + ' world')
original = 'hello'
original = function['function'](original)
eq_(original, 'hello world')
def test_nested():
@rebuild
def first(a):
@rebuild
def second(b):
return value(b + 'b')
return value(second['second'](a))
original = 'ba'
original = first['first'](original)
eq_(original, 'bab')
if __name__ == '__main__':
test_simple()
| mit | Python |
b82cf905280a3be91dcd3ac536495643f435f7b1 | fix thing | abau171/helpmio,abau171/helpmio,abau171/helpmio | helpmio/chat.py | helpmio/chat.py | import uuid
import helpmio.event
class ChatRoom:
def __init__(self, asker_name):
self._connected_users = dict()
self._all_users = dict()
self._chat_history = []
self.on_connect = helpmio.event.EventDispatcher()
self.on_disconnect = helpmio.event.EventDispatcher()
self.on_chat = helpmio.event.EventDispatcher()
self._asker_name = asker_name
def connect(self, nickname):
connection_id = str(uuid.uuid4())
self._connected_users[connection_id] = nickname
self._all_users[connection_id] = nickname
self.on_connect(connection_id)
return connection_id
def disconnect(self, connection_id):
del self._connected_users[connection_id]
self.on_disconnect(connection_id)
def add_chat(self, connection_id, text):
chat = (connection_id, text)
self._chat_history.append(chat)
self.on_chat(chat)
def get_user(self, connection_id):
if connection_id in self._all_users:
return self._all_users[connection_id]
else:
return None
def get_connected_users(self):
return dict(self._connected_users)
def get_num_connected_users(self):
return len(self._connected_users)
def get_all_users(self):
return dict(self._all_users)
def get_chat_history(self):
return self._chat_history[:]
def get_asker_name(self):
return self._asker_name
def asker_is_connected(self):
for nickname in self._connected_users.values():
if nickname == self._asker_name:
return True
return False
def non_asker_is_connected(self):
for nickname in self._connected_users.values():
if nickname != self._asker_name:
return True
return False
| import uuid
import helpmio.event
class ChatRoom:
def __init__(self, asker_name):
self._connected_users = dict()
self._all_users = dict()
self._chat_history = []
self.on_connect = helpmio.event.EventDispatcher()
self.on_disconnect = helpmio.event.EventDispatcher()
self.on_chat = helpmio.event.EventDispatcher()
self._asker_name = asker_name
def connect(self, nickname):
connection_id = str(uuid.uuid4())
self._connected_users[connection_id] = nickname
self._all_users[connection_id] = nickname
self.on_connect(connection_id)
return connection_id
def disconnect(self, connection_id):
del self._connected_users[connection_id]
self.on_disconnect(connection_id)
def add_chat(self, connection_id, text):
chat = (connection_id, text)
self._chat_history.append(chat)
self.on_chat(chat)
def get_user(self, connection_id):
return self._all_users[connection_id]
def get_connected_users(self):
return dict(self._connected_users)
def get_num_connected_users(self):
return len(self._connected_users)
def get_all_users(self):
return dict(self._all_users)
def get_chat_history(self):
return self._chat_history[:]
def get_asker_name(self):
return self._asker_name
def asker_is_connected(self):
for nickname in self._connected_users.values():
if nickname == self._asker_name:
return True
return False
def non_asker_is_connected(self):
for nickname in self._connected_users.values():
if nickname != self._asker_name:
return True
return False
| mit | Python |
1fdd2c838f0a947ff34770ba4660c4662adfeb32 | Allow deciseconds | xeroc/python-graphenelib | graphenecommon/utils.py | graphenecommon/utils.py | # -*- coding: utf-8 -*-
import re
import time
from datetime import datetime, timezone
timeFormat = "%Y-%m-%dT%H:%M:%S"
def formatTime(t):
""" Properly Format Time for permlinks
"""
if isinstance(t, float):
return datetime.utcfromtimestamp(t).strftime(timeFormat)
if isinstance(t, datetime):
return t.strftime(timeFormat)
def formatTimeString(t):
""" Properly Format Time for permlinks
"""
try:
return datetime.strptime(t, timeFormat)
except Exception:
# Allow deci seconds
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
def formatTimeFromNow(secs=None):
""" Properly Format Time that is `x` seconds in the future
:param int secs: Seconds to go in the future (`x>0`) or the
past (`x<0`)
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
:rtype: str
"""
return datetime.utcfromtimestamp(time.time() + int(secs or 0)).strftime(timeFormat)
def parse_time(block_time):
"""Take a string representation of time from the blockchain, and parse it
into datetime object.
"""
return formatTimeString(block_time).replace(tzinfo=timezone.utc)
def assets_from_string(text):
"""Correctly split a string containing an asset pair.
Splits the string into two assets with the separator being on of the
following: ``:``, ``/``, or ``-``.
"""
return re.split(r"[\-:/]", text)
| import re
import time
from datetime import datetime, timezone
timeFormat = "%Y-%m-%dT%H:%M:%S"
def formatTime(t):
""" Properly Format Time for permlinks
"""
if isinstance(t, float):
return datetime.utcfromtimestamp(t).strftime(timeFormat)
if isinstance(t, datetime):
return t.strftime(timeFormat)
def formatTimeString(t):
""" Properly Format Time for permlinks
"""
return datetime.strptime(t, timeFormat)
def formatTimeFromNow(secs=None):
""" Properly Format Time that is `x` seconds in the future
:param int secs: Seconds to go in the future (`x>0`) or the
past (`x<0`)
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
:rtype: str
"""
return datetime.utcfromtimestamp(time.time() + int(secs or 0)).strftime(timeFormat)
def parse_time(block_time):
"""Take a string representation of time from the blockchain, and parse it
into datetime object.
"""
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc)
def assets_from_string(text):
"""Correctly split a string containing an asset pair.
Splits the string into two assets with the separator being on of the
following: ``:``, ``/``, or ``-``.
"""
return re.split(r"[\-:/]", text)
| mit | Python |
2a38fd96cba20048264a67980b552489c8c2762f | Support for direct assignment of values of function calls | ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang | thinglang/parser/symbols/base.py | thinglang/parser/symbols/base.py | import struct
from thinglang.compiler import CompilationContext, BytecodeSymbols
from thinglang.lexer.tokens import LexicalToken
from thinglang.lexer.tokens.base import LexicalIdentifier
from thinglang.parser.symbols import BaseSymbol
from thinglang.parser.symbols.functions import MethodCall
from thinglang.utils.type_descriptors import ValueType
class AssignmentOperation(BaseSymbol):
DECELERATION = object()
REASSIGNMENT = object()
INDETERMINATE = object()
def __init__(self, slice):
super(AssignmentOperation, self).__init__(slice)
self.target = None
if len(slice) == 4:
_1, self.name, _2, self.value = slice
self.name.type = slice[0]
self.intent = self.DECELERATION
else:
self.name, _, self.value = slice
self.intent = self.REASSIGNMENT
def describe(self):
return '{} = {}'.format(self.name, self.value)
def references(self):
return (self.name, self.value.references()) if self.intent is self.REASSIGNMENT else self.value.references()
@classmethod
def create(cls, name, value, type=None):
return cls(([type] if type is not None else []) + [name, None, value])
def transpile(self):
if self.intent is self.DECELERATION:
return '{} {} = {};'.format(self.name.type.transpile(), self.name.transpile(), self.value.transpile())
elif self.intent is self.REASSIGNMENT:
return '{} = {};'.format(self.name.transpile(), self.value.transpile())
def compile(self, context: CompilationContext):
if self.value.implements(MethodCall):
self.value.compile(context, returns=True)
context.append(BytecodeSymbols.set(self.target))
elif self.value.STATIC:
context.append(BytecodeSymbols.set_static(self.target, context.append_static(self.value.serialize())))
class InlineString(LexicalToken, ValueType): # immediate string e.g. "hello world"
STATIC = True
TYPE = LexicalIdentifier("text")
def __init__(self, value):
super().__init__(None)
self.value = value
def evaluate(self, _):
return self.value
def serialize(self):
return struct.pack('<iI', -1, len(self.value)) + bytes(self.value, 'utf-8')
def references(self):
return ()
def transpile(self):
return f'"{self.value}"'
def type(self):
return self.TYPE
| import struct
from thinglang.compiler import CompilationContext, BytecodeSymbols
from thinglang.lexer.tokens import LexicalToken
from thinglang.lexer.tokens.base import LexicalIdentifier
from thinglang.parser.symbols import BaseSymbol
from thinglang.utils.type_descriptors import ValueType
class AssignmentOperation(BaseSymbol):
DECELERATION = object()
REASSIGNMENT = object()
INDETERMINATE = object()
def __init__(self, slice):
super(AssignmentOperation, self).__init__(slice)
self.target = None
if len(slice) == 4:
_1, self.name, _2, self.value = slice
self.name.type = slice[0]
self.intent = self.DECELERATION
else:
self.name, _, self.value = slice
self.intent = self.REASSIGNMENT
def describe(self):
return '{} = {}'.format(self.name, self.value)
def references(self):
return (self.name, self.value.references()) if self.intent is self.REASSIGNMENT else self.value.references()
@classmethod
def create(cls, name, value, type=None):
return cls(([type] if type is not None else []) + [name, None, value])
def transpile(self):
if self.intent is self.DECELERATION:
return '{} {} = {};'.format(self.name.type.transpile(), self.name.transpile(), self.value.transpile())
elif self.intent is self.REASSIGNMENT:
return '{} = {};'.format(self.name.transpile(), self.value.transpile())
def compile(self, context: CompilationContext):
context.append(BytecodeSymbols.set_static(self.target, context.append_static(self.value.serialize())))
class InlineString(LexicalToken, ValueType): # immediate string e.g. "hello world"
STATIC = True
TYPE = LexicalIdentifier("text")
def __init__(self, value):
super().__init__(None)
self.value = value
def evaluate(self, _):
return self.value
def serialize(self):
return struct.pack('<iI', -1, len(self.value)) + bytes(self.value, 'utf-8')
def references(self):
return ()
def transpile(self):
return f'"{self.value}"'
def type(self):
return self.TYPE
| mit | Python |
fab3a21e70b9ee83b948c0698b25639f374d5851 | fix bug - deduplicator | robcza/intelmq,pkug/intelmq,sch3m4/intelmq,aaronkaplan/intelmq,sch3m4/intelmq,robcza/intelmq,robcza/intelmq,pkug/intelmq,aaronkaplan/intelmq,certtools/intelmq,aaronkaplan/intelmq,sch3m4/intelmq,sch3m4/intelmq,certtools/intelmq,pkug/intelmq,robcza/intelmq,certtools/intelmq,pkug/intelmq | intelmq/bots/experts/deduplicator/deduplicator.py | intelmq/bots/experts/deduplicator/deduplicator.py | from copy import deepcopy
from intelmq.lib.bot import Bot, sys
from intelmq.lib.cache import Cache
from intelmq.lib.message import Event
class DeduplicatorBot(Bot):
def init(self):
self.cache = Cache(
self.parameters.redis_cache_host,
self.parameters.redis_cache_port,
self.parameters.redis_cache_db,
self.parameters.redis_cache_ttl
)
def process(self):
message = self.receive_message()
if message:
# Event deduplication
if isinstance(message, Event):
event = deepcopy(message)
event.clear("observation_time")
message_hash = hash(event)
# Generic message deduplication
else:
message_hash = hash(message)
if not self.cache.exists(message_hash):
self.cache.set(message_hash, 'hash')
self.send_message(message)
self.acknowledge_message()
if __name__ == "__main__":
bot = DeduplicatorBot(sys.argv[1])
bot.start()
| from copy import copy
from intelmq.lib.bot import Bot, sys
from intelmq.lib.cache import Cache
from intelmq.lib.message import Event
class DeduplicatorBot(Bot):
def init(self):
self.cache = Cache(
self.parameters.redis_cache_host,
self.parameters.redis_cache_port,
self.parameters.redis_cache_db,
self.parameters.redis_cache_ttl
)
def process(self):
message = self.receive_message()
if message:
# Event deduplication
if isinstance(message, Event):
event = copy(message)
event.clear("observation_time")
message_hash = hash(event)
# Generic message deduplication
else:
message_hash = hash(message)
if not self.cache.exists(message_hash):
self.send_message(message)
self.cache.set(message_hash, 'hash')
self.acknowledge_message()
if __name__ == "__main__":
bot = DeduplicatorBot(sys.argv[1])
bot.start()
| agpl-3.0 | Python |
04319b38278269d34d253c4be973c1309edc579c | Add missing no cover to util | explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc | thinc/neural/util.py | thinc/neural/util.py | from __future__ import print_function, unicode_literals
import numpy
from .ops import NumpyOps, CupyOps
def get_ops(ops):
if ops in ('numpy', 'cpu'):
return NumpyOps()
elif ops in ('cupy', 'gpu'):
return CupyOps()
else:
raise ValueError("TODO error %s" % ops)
def mark_sentence_boundaries(sequences, drop=0.): # pragma: no cover
'''Pad sentence sequences with EOL markers.'''
for sequence in sequences:
sequence.insert(0, '-EOL-')
sequence.insert(0, '-EOL-')
sequence.append('-EOL-')
sequence.append('-EOL-')
return sequences, None
def flatten_sequences(sequences, drop=0.): # pragma: no cover
ops = NumpyOps()
return ops.flatten(sequences), None
def partition(examples, split_size): # pragma: no cover
examples = list(examples)
numpy.random.shuffle(examples)
n_docs = len(examples)
split = int(n_docs * split_size)
return examples[:split], examples[split:]
def minibatch(stream, batch_size=1000): # pragma: no cover
batch = []
for X in stream:
batch.append(X)
if len(batch) >= batch_size:
yield batch
batch = []
if len(batch) != 0:
yield batch
| from __future__ import print_function, unicode_literals
import numpy
from .ops import NumpyOps, CupyOps
def get_ops(ops):
if ops in ('numpy', 'cpu'):
return NumpyOps()
elif ops in ('cupy', 'gpu'):
return CupyOps()
else:
raise ValueError("TODO error %s" % ops)
def mark_sentence_boundaries(sequences, drop=0.): # pragma: no cover
'''Pad sentence sequences with EOL markers.'''
for sequence in sequences:
sequence.insert(0, '-EOL-')
sequence.insert(0, '-EOL-')
sequence.append('-EOL-')
sequence.append('-EOL-')
return sequences, None
def flatten_sequences(sequences, drop=0.):
ops = NumpyOps()
return ops.flatten(sequences), None
def partition(examples, split_size): # pragma: no cover
examples = list(examples)
numpy.random.shuffle(examples)
n_docs = len(examples)
split = int(n_docs * split_size)
return examples[:split], examples[split:]
def minibatch(stream, batch_size=1000): # pragma: no cover
batch = []
for X in stream:
batch.append(X)
if len(batch) >= batch_size:
yield batch
batch = []
if len(batch) != 0:
yield batch
| mit | Python |
ad56a9cac7f0e62ef901a52589a62be3a2c81c46 | Add secret key to testing settings | ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas | api/settings/testing.py | api/settings/testing.py | from .base import *
ADMINS = (
('Lyndon Garvey', 'lyndon.garvey@digital.justice.gov.uk'),
)
SECRET_KEY = "Trdfgjgfghfdgjlfdtr_+@3gvuedrs873w" | from .base import *
ADMINS = (
('Lyndon Garvey', 'lyndon.garvey@digital.justice.gov.uk'),
) | mit | Python |
7c91cfa7e68d4f9b54a4380915235d5a443e0dc3 | Remove benchmark in create_record | plamut/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core | src/ggrc/fulltext/sql.py | src/ggrc/fulltext/sql.py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""SQL routines for full-text indexing."""
from ggrc import db
from ggrc.fulltext import Indexer
class SqlIndexer(Indexer):
def create_record(self, record, commit=True):
for prop, value in record.properties.items():
for subproperty, content in value.items():
if content:
db.session.add(self.record_type(
key=record.key,
type=record.type,
context_id=record.context_id,
tags=record.tags,
property=prop,
subproperty=subproperty,
content=content,
))
if commit:
db.session.commit()
def update_record(self, record, commit=True):
# remove the obsolete index entries
if record.properties:
db.session.query(self.record_type).filter(
self.record_type.key == record.key,
self.record_type.type == record.type,
self.record_type.property.in_(list(record.properties.keys())),
).delete(synchronize_session="fetch")
# add new index entries
self.create_record(record, commit=commit)
def delete_record(self, key, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.key == key,
self.record_type.type == type).delete()
if commit:
db.session.commit()
def delete_all_records(self, commit=True):
db.session.query(self.record_type).delete()
if commit:
db.session.commit()
def delete_records_by_type(self, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.type == type).delete()
if commit:
db.session.commit()
| # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""SQL routines for full-text indexing."""
from ggrc import db
from ggrc.fulltext import Indexer
from ggrc.utils import benchmark
class SqlIndexer(Indexer):
def create_record(self, record, commit=True):
with benchmark("Add fulltext records: create_record -> submit to db"):
for prop, value in record.properties.items():
for subproperty, content in value.items():
if content:
db.session.add(self.record_type(
key=record.key,
type=record.type,
context_id=record.context_id,
tags=record.tags,
property=prop,
subproperty=subproperty,
content=content,
))
if commit:
db.session.commit()
def update_record(self, record, commit=True):
# remove the obsolete index entries
if record.properties:
db.session.query(self.record_type).filter(
self.record_type.key == record.key,
self.record_type.type == record.type,
self.record_type.property.in_(list(record.properties.keys())),
).delete(synchronize_session="fetch")
# add new index entries
self.create_record(record, commit=commit)
def delete_record(self, key, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.key == key,
self.record_type.type == type).delete()
if commit:
db.session.commit()
def delete_all_records(self, commit=True):
db.session.query(self.record_type).delete()
if commit:
db.session.commit()
def delete_records_by_type(self, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.type == type).delete()
if commit:
db.session.commit()
| apache-2.0 | Python |
b1b2a3db2bc2e764e895dacf1c44ed1e674c7270 | Update validate.py | timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug | hug/validate.py | hug/validate.py | """hug/validate.py
Defines hugs built-in validation methods
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
def all(*validators):
"""Validation only succeeds if all passed in validators return no errors"""
def validate_all(fields):
for validator in validators:
errors = validator(fields)
if errors:
return errors
validate_all.__doc__ = " and ".join(validator.__doc__ for validator in validators)
return validate_all
def any(*validators):
"""If any of the specified validators pass the validation succeeds"""
def validate_any(fields):
errors = {}
for validator in validators:
validation_errors = validator(fields)
if not validation_errors:
return
errors.update(validation_errors)
return errors
validate_any.__doc__ = " or ".join(validator.__doc__ for validator in validators)
return validate_any
def contains_one_of(*fields):
"""Enables ensuring that one of multiple optional fields is set"""
message = 'Must contain any one of the following fields: {0}'.format(', '.join(fields))
def check_contains(endpoint_fields):
for field in fields:
if field in endpoint_fields:
return
errors = {}
for field in fields:
errors[field] = 'one of these must have a value'
return errors
check_contains.__doc__ = message
return check_contains
| """hug/validate.py
Defines hugs built-in validation methods
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
def all(*validators):
"""Validation only succeeds if all passed in validators return no errors"""
def validate_all(fields):
for validator in validators:
errors = validator(fields)
if errors:
return errors
validate_all.__doc__ = " and ".join(validator.__doc__ for validator in validators)
return validate_all
def any(*validators):
"""If any of the specified validators pass the validation succeeds"""
def validate_any(fields):
errors = {}
for validator in validators:
validation_errors = validator(fields)
if not validation_errors:
return
errors.update(validation_errors)
return errors
validate_any.__doc__ = " or ".join(validator.__doc__ for validator in validators)
return validate_any
def contains_one_of(*fields):
"""Enables ensuring that one of multiple optional fields is set"""
message = 'Must contain any one of the following fields: {0}'.format(', '.join(fields))
def check_contains(endpoint_fields, **kwargs):
for field in fields:
if field in endpoint_fields:
return
errors = {}
for field in fields:
errors[field] = 'one of these must have a value'
return errors
check_contains.__doc__ = message
return check_contains
| mit | Python |
70593ae9b29577cc9b038209c8fa173cf3883e47 | Update tweetimporter.py | franckbrignoli/twitter-bot-detection | libraries/tweetimporter.py | libraries/tweetimporter.py | import unicodedata
class TweetImporter(object):
def __init__(self, twitter_client, database):
self.twitter_client = twitter_client
self.database = database
def importData(self, user, tweets_number=10):
followers = self.twitter_client.followers_list(screen_name=user, count=1)
for j, follower in enumerate(followers):
tweets = self.twitter_client.user_timeline(screen_name=follower, count=tweets_number)
mentions_list = []
for i, status in enumerate(tweets):
tweet = status._json
text = tweet['text']
date = tweet['created_at']
entities = tweet['entities']
user_mentions = entities['user_mentions']
if len(user_mentions) > 0:
for mention in user_mentions:
mentions_list.append(mention['screen_name'])
text_string = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
date_string = unicodedata.normalize('NFKD', date).encode('ascii','ignore')
name_mentions_string = ",".join(mentions_list)
self.database.feed_table(
user,
text_string,
date_string,
name_mentions_string)
| import unicodedata
class TweetImporter(object):
def __init__(self, twitter_client, database):
self.twitter_client = twitter_client
self.database = database
def importData(self, user, tweets_number=10):
followers = self.twitter_client.followers_list(screen_name=user, count=1)
for j, follower in enumerate(followers):
tweets = self.twitter_client.user_timeline(screen_name=follower, count=tweets_number)
mentions_list = []
for i, status in enumerate(tweets):
tweet = status._json
text = tweet['text']
date = tweet['created_at']
entities = tweet['entities']
user_mentions = entities['user_mentions']
if len(user_mentions) > 0:
for mention in user_mentions:
mentions_list.append(mention['screen_name'])
id_string = str(i)+"_"+str(j)
text_string = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
date_string = unicodedata.normalize('NFKD', date).encode('ascii','ignore')
name_mentions_string = ",".join(mentions_list)
self.database.feed_table(id_string,
user,
text_string,
date_string,
name_mentions_string) | mit | Python |
a90554993c47987f9b253330a8858dad254766ec | Fix variable. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Demo/scripts/script.py | Demo/scripts/script.py | #! /usr/bin/env python
# script.py -- Make typescript of terminal session.
# Usage:
# -a Append to typescript.
# -p Use Python as shell.
# Author: Steen Lumholt.
import os, time, sys, getopt
import pty
def read(fd):
data = os.read(fd, 1024)
script.write(data)
return data
shell = 'sh'
filename = 'typescript'
mode = 'w'
if os.environ.has_key('SHELL'):
shell = os.environ['SHELL']
try:
opts, args = getopt.getopt(sys.argv[1:], 'ap')
except getopt.error, msg:
print '%s: %s' % (sys.argv[0], msg)
sys.exit(2)
for o, a in opts:
if o == '-a':
mode = 'a'
elif o == '-p':
shell = 'python'
script = open(filename, mode)
sys.stdout.write('Script started, file is %s\n' % filename)
script.write('Script started on %s\n' % time.ctime(time.time()))
pty.spawn(shell, read)
script.write('Script done on %s\n' % time.ctime(time.time()))
sys.stdout.write('Script done, file is %s\n' % filename)
| #! /usr/bin/env python
# script.py -- Make typescript of terminal session.
# Usage:
# -a Append to typescript.
# -p Use Python as shell.
# Author: Steen Lumholt.
import os, time, sys, getopt
import pty
def read(fd):
data = os.read(fd, 1024)
file.write(data)
return data
shell = 'sh'
filename = 'typescript'
mode = 'w'
if os.environ.has_key('SHELL'):
shell = os.environ['SHELL']
try:
opts, args = getopt.getopt(sys.argv[1:], 'ap')
except getopt.error, msg:
print '%s: %s' % (sys.argv[0], msg)
sys.exit(2)
for o, a in opts:
if o == '-a':
mode = 'a'
elif o == '-p':
shell = 'python'
script = open(filename, mode)
sys.stdout.write('Script started, file is %s\n' % filename)
script.write('Script started on %s\n' % time.ctime(time.time()))
pty.spawn(shell, read)
script.write('Script done on %s\n' % time.ctime(time.time()))
sys.stdout.write('Script done, file is %s\n' % filename)
| mit | Python |
ec0ec8a36e45f68676aa88ba5dbf7889cf1aab79 | Improve logging | daevaorn/sentry,looker/sentry,ifduyue/sentry,mvaled/sentry,beeftornado/sentry,gencer/sentry,looker/sentry,fotinakis/sentry,JackDanger/sentry,looker/sentry,BuildingLink/sentry,zenefits/sentry,fotinakis/sentry,daevaorn/sentry,jean/sentry,JamesMura/sentry,JackDanger/sentry,BuildingLink/sentry,mvaled/sentry,gencer/sentry,nicholasserra/sentry,JackDanger/sentry,jean/sentry,mvaled/sentry,daevaorn/sentry,jean/sentry,nicholasserra/sentry,beeftornado/sentry,BayanGroup/sentry,ifduyue/sentry,mitsuhiko/sentry,alexm92/sentry,zenefits/sentry,imankulov/sentry,ifduyue/sentry,jean/sentry,zenefits/sentry,zenefits/sentry,ifduyue/sentry,daevaorn/sentry,mvaled/sentry,JamesMura/sentry,mvaled/sentry,BayanGroup/sentry,BuildingLink/sentry,alexm92/sentry,gencer/sentry,jean/sentry,JamesMura/sentry,beeftornado/sentry,BuildingLink/sentry,JamesMura/sentry,looker/sentry,fotinakis/sentry,gencer/sentry,mitsuhiko/sentry,alexm92/sentry,BayanGroup/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry,gencer/sentry,zenefits/sentry,nicholasserra/sentry,JamesMura/sentry,fotinakis/sentry,imankulov/sentry,BuildingLink/sentry,imankulov/sentry | src/sentry/tasks/sync_docs.py | src/sentry/tasks/sync_docs.py | from __future__ import absolute_import, print_function
import logging
from sentry.tasks.base import instrumented_task
BASE_URL = 'https://docs.getsentry.com/hosted/_platforms/{}'
logger = logging.getLogger('sentry')
def get_integration_id(platform_id, integration_id):
if integration_id == '_self':
return platform_id
return '{}-{}'.format(platform_id, integration_id)
@instrumented_task(name='sentry.tasks.sync_docs', queue='update')
def sync_docs():
from sentry import http, options
session = http.build_session()
logger.info('Syncing documentation (platform index)')
data = session.get(BASE_URL.format('_index.json')).json()
platform_list = []
for platform_id, integrations in data['platforms'].iteritems():
platform_list.append({
'id': platform_id,
'name': integrations['_self']['name'],
'integrations': [
{
'id': get_integration_id(platform_id, i_id),
'name': i_data['name'],
'type': i_data['type'],
'link': i_data['doc_link'],
} for i_id, i_data in sorted(
integrations.iteritems(),
key=lambda x: x[1]['name']
)
],
})
platform_list.sort(key=lambda x: x['name'])
options.set('sentry:docs', {'platforms': platform_list})
for platform_id, platform_data in data['platforms'].iteritems():
for integration_id, integration in platform_data.iteritems():
logger.info('Syncing documentation for %s.%s integration',
platform_id, integration_id)
sync_integration(platform_id, integration_id, integration['details'])
def sync_integration(platform_id, integration_id, path):
from sentry import http, options
session = http.build_session()
data = session.get(BASE_URL.format(path)).json()
key = get_integration_id(platform_id, integration_id)
options.set('sentry:docs:{}'.format(key), {
'id': key,
'name': data['name'],
'html': data['body'],
'link': data['doc_link'],
})
| from __future__ import absolute_import, print_function
import logging
from sentry.tasks.base import instrumented_task
BASE_URL = 'https://docs.getsentry.com/hosted/_platforms/{}'
logger = logging.getLogger('sentry')
def get_integration_id(platform_id, integration_id):
if integration_id == '_self':
return platform_id
return '{}-{}'.format(platform_id, integration_id)
@instrumented_task(name='sentry.tasks.sync_docs', queue='update')
def sync_docs():
from sentry import http, options
session = http.build_session()
logger.info('Syncing documentation (platform index)')
data = session.get(BASE_URL.format('_index.json')).json()
platform_list = []
for platform_id, integrations in data['platforms'].iteritems():
platform_list.append({
'id': platform_id,
'name': integrations['_self']['name'],
'integrations': [
{
'id': get_integration_id(platform_id, i_id),
'name': i_data['name'],
'type': i_data['type'],
'link': i_data['doc_link'],
} for i_id, i_data in sorted(
integrations.iteritems(),
key=lambda x: x[1]['name']
)
],
})
platform_list.sort(key=lambda x: x['name'])
options.set('sentry:docs', {'platforms': platform_list})
for platform_id, platform_data in data['platforms'].iteritems():
for integration_id, integration in platform_data.iteritems():
logger.info('Syncing documentation for %s integration', integration_id)
sync_integration(platform_id, integration_id, integration['details'])
def sync_integration(platform_id, integration_id, path):
from sentry import http, options
session = http.build_session()
data = session.get(BASE_URL.format(path)).json()
key = get_integration_id(platform_id, integration_id)
options.set('sentry:docs:{}'.format(key), {
'id': key,
'name': data['name'],
'html': data['body'],
'link': data['doc_link'],
})
| bsd-3-clause | Python |
75080e6f0da4f699ef1eb89310847befeccfab40 | Check for deprecation on import is problematic. Rather just check that filter can be imported normally. | michaelaye/scikit-image,warmspringwinds/scikit-image,juliusbierk/scikit-image,michaelpacer/scikit-image,ofgulban/scikit-image,vighneshbirodkar/scikit-image,oew1v07/scikit-image,chriscrosscutler/scikit-image,pratapvardhan/scikit-image,robintw/scikit-image,paalge/scikit-image,vighneshbirodkar/scikit-image,youprofit/scikit-image,newville/scikit-image,blink1073/scikit-image,youprofit/scikit-image,keflavich/scikit-image,keflavich/scikit-image,oew1v07/scikit-image,paalge/scikit-image,michaelpacer/scikit-image,juliusbierk/scikit-image,bsipocz/scikit-image,Britefury/scikit-image,robintw/scikit-image,chriscrosscutler/scikit-image,WarrenWeckesser/scikits-image,jwiggins/scikit-image,pratapvardhan/scikit-image,ClinicalGraphics/scikit-image,ajaybhat/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,WarrenWeckesser/scikits-image,paalge/scikit-image,GaZ3ll3/scikit-image,vighneshbirodkar/scikit-image,emon10005/scikit-image,Midafi/scikit-image,rjeli/scikit-image,bsipocz/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,ofgulban/scikit-image,warmspringwinds/scikit-image,newville/scikit-image,dpshelio/scikit-image,ajaybhat/scikit-image,Midafi/scikit-image,bennlich/scikit-image,emon10005/scikit-image,blink1073/scikit-image,rjeli/scikit-image,GaZ3ll3/scikit-image,michaelaye/scikit-image,bennlich/scikit-image,Hiyorimi/scikit-image,dpshelio/scikit-image,Hiyorimi/scikit-image,ClinicalGraphics/scikit-image | skimage/filter/tests/test_filter_import.py | skimage/filter/tests/test_filter_import.py | from numpy.testing import assert_warns
from warnings import catch_warnings, simplefilter
def test_import_filter():
with catch_warnings():
simplefilter('ignore')
from skimage import filter as F
assert('sobel' in dir(F))
| from skimage._shared.utils import all_warnings, skimage_deprecation
from numpy.testing import assert_warns
def import_filter():
from skimage import filter as F
assert('sobel' in dir(F))
def test_filter_import():
with all_warnings():
assert_warns(skimage_deprecation, import_filter)
| bsd-3-clause | Python |
3778014fb18f5a34a214d0fae3f7b57ab866284a | Exclude MPs who are no longer members of a fraction. | ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt | manoseimas/mps_v2/views.py | manoseimas/mps_v2/views.py | from django.shortcuts import render
from django.utils.safestring import mark_safe
from couchdbkit.exceptions import ResourceNotFound
from sboard.models import couch
from manoseimas.mps.nodes import prepare_position_list
from .models import ParliamentMember, GroupMembership, Group
def mp_list(request, fraction_slug=None):
def extract(mp):
return {
'id': mp.id,
'full_name': mp.full_name,
'slug': mp.slug
}
fractions = Group.objects.filter(type=Group.TYPE_FRACTION)
if fraction_slug:
fraction = GroupMembership.objects.filter(
group__type=Group.TYPE_FRACTION,
group__slug=fraction_slug
)
fraction = fraction[0].group if fraction else None
mps = map(
extract,
ParliamentMember.objects.filter(groups=fraction,
groupmembership__until=None)
)
else:
mps = map(extract, ParliamentMember.objects.all())
return render(request, 'mp_catalog.jade', {'mps': mps,
'fractions': fractions})
def mp_profile(request, mp_slug):
mp = ParliamentMember.objects.get(slug=mp_slug)
profile = {'full_name': mp.full_name}
if mp.current_fraction:
profile["fraction_name"] = mp.current_fraction.name
else:
profile["fraction_name"] = None
try:
mp_node = couch.get(mp.source_id)
positions = prepare_position_list(mp_node)
except ResourceNotFound:
positions = None
stats = {
'statement_count': mp.get_statement_count(),
'long_statement_count': mp.get_long_statement_count(),
'contributed_discussion_percentage':
mp.get_discussion_contribution_percentage(),
}
context = {
'profile': profile,
'positions': positions,
'memberships': mp.other_group_memberships,
'biography': mark_safe(mp.biography),
'stats': stats,
'photo_url': mp.photo.url,
}
return render(request, 'profile.jade', context)
| from django.shortcuts import render
from django.utils.safestring import mark_safe
from couchdbkit.exceptions import ResourceNotFound
from sboard.models import couch
from manoseimas.mps.nodes import prepare_position_list
from .models import ParliamentMember, GroupMembership, Group
def mp_list(request, fraction_slug=None):
def extract(mp):
return {
'id': mp.id,
'full_name': mp.full_name,
'slug': mp.slug
}
fractions = Group.objects.filter(type=Group.TYPE_FRACTION)
if fraction_slug:
fraction = GroupMembership.objects.filter(
group__type=Group.TYPE_FRACTION,
group__slug=fraction_slug
)
fraction = fraction[0].group if fraction else None
mps = map(extract, ParliamentMember.objects.filter(groups=fraction))
else:
mps = map(extract, ParliamentMember.objects.all())
return render(request, 'mp_catalog.jade', {'mps': mps,
'fractions': fractions})
def mp_profile(request, mp_slug):
mp = ParliamentMember.objects.get(slug=mp_slug)
profile = {'full_name': mp.full_name}
if mp.current_fraction:
profile["fraction_name"] = mp.current_fraction.name
else:
profile["fraction_name"] = None
try:
mp_node = couch.get(mp.source_id)
positions = prepare_position_list(mp_node)
except ResourceNotFound:
positions = None
stats = {
'statement_count': mp.get_statement_count(),
'long_statement_count': mp.get_long_statement_count(),
'contributed_discussion_percentage':
mp.get_discussion_contribution_percentage(),
}
context = {
'profile': profile,
'positions': positions,
'memberships': mp.other_group_memberships,
'biography': mark_safe(mp.biography),
'stats': stats,
'photo_url': mp.photo.url,
}
return render(request, 'profile.jade', context)
| agpl-3.0 | Python |
d2328c28c89c1700906244610e92854c578e1262 | test now a package | jandecaluwe/myhdl,palashahuja/myhdl,j-marjanovic/myhdl,myhdl/myhdl,jmgc/myhdl-numeric,hgomersall/myhdl,jck/myhdl,juhasch/myhdl,hgomersall/myhdl,josyb/myhdl,juhasch/myhdl,myhdl/myhdl,jandecaluwe/myhdl,cfelton/myhdl,palashahuja/myhdl,palashahuja/myhdl,jandecaluwe/myhdl,jmgc/myhdl-numeric,myhdl/myhdl,jck/myhdl,gw0/myhdl,palashahuja/myhdl,j-marjanovic/myhdl,josyb/myhdl,cfelton/myhdl,gw0/myhdl,cfelton/myhdl,jmgc/myhdl-numeric,jck/myhdl,gw0/myhdl,josyb/myhdl,hgomersall/myhdl,j-marjanovic/myhdl,juhasch/myhdl | test/test.py | test/test.py | """ Run all the myhdl unit tests. """
import unittest
from myhdl.test import test_all
unittest.main(defaultTest='test_all.suite',
testRunner=unittest.TextTestRunner(verbosity=2))
| """ Run all the myhdl unit tests. """
import unittest
from myhdl import test_all
unittest.main(defaultTest='test_all.suite',
testRunner=unittest.TextTestRunner(verbosity=2))
| lgpl-2.1 | Python |
3b4dc1aefe17d14ad74edbdfa133b26247880f73 | Update run_init_temp.py | cornell-zhang/datuner,cornell-zhang/datuner,cornell-zhang/datuner,cornell-zhang/datuner,cornell-zhang/datuner,cornell-zhang/datuner | scripts/tests/run_init_temp.py | scripts/tests/run_init_temp.py | import os
import sys
pwd =os.getcwd()
tpath = 'vtr_flow_holder'
design = 'diffeq1'
wrksp = 'workspace_holder'
proc_num = 1
datpath = sys.path[0]
srcFile = datpath+"/run_DATuner.py"
rep_cmd = "sed -e \"s:TOOL_PATH_HOLD:"+tpath+":g\" -e \"s:DESIGN_NAME_HOLD:"+design+":g\" -e \"s:WORK_SPACE_HOLD:"+wrksp+":g\" -e \"s:DATUNER_PATH_HOLD:"+datpath+":g\" -e \"s:PROC_NUM_HOLD:"+str(proc_num)+":g\" "+srcFile+" > "+datpath+"/run_DATuner"+"_new.py"
os.system(rep_cmd)
run_cmd = "python "+ pwd + "/releases/Linux_x86_64/scripts/run_DATuner_new.py -f vtr -t 20 2 0 0.2197 -p 2 -b 1"
print "call: python ./run_DATuner_new.py -f vtr -t 20 2 0 0.2197 -p 2 -b 1" +" " + run_cmd
os.popen(run_cmd)
| import os
import sys
pwd =os.getcwd()
tpath = 'vtr_flow_holder'
design = 'diffeq1'
wrksp = 'workspace_holder'
proc_num = 1
datpath = sys.path[0]
srcFile = datpath+"/run_DATuner_arg.py"
rep_cmd = "sed -e \"s:TOOL_PATH_HOLD:"+tpath+":g\" -e \"s:DESIGN_NAME_HOLD:"+design+":g\" -e \"s:WORK_SPACE_HOLD:"+wrksp+":g\" -e \"s:DATUNER_PATH_HOLD:"+datpath+":g\" -e \"s:PROC_NUM_HOLD:"+str(proc_num)+":g\" "+srcFile+" > "+datpath+"/run_DATuner"+"_new.py"
os.system(rep_cmd)
run_cmd = "python "+ pwd + "/releases/Linux_x86_64/scripts/run_DATuner_new.py -f vtr -t 20 2 0 0.2197 -p 2 -b 1"
print "call: python ./run_DATuner_new.py -f vtr -t 20 2 0 0.2197 -p 2 -b 1" +" " + run_cmd
os.popen(run_cmd)
| bsd-3-clause | Python |
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | Update Clock.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | service/Clock.py | service/Clock.py | # start the services
python = Runtime.start("python","Python")
clock = Runtime.start("clock","Clock")
log = Runtime.start("log","Log")
audio = Runtime.start("audio","AudioFile")
# define a ticktock method
def ticktock(timedata):
print timedata
audio.playResource("resource/Clock/tick.mp3")
#create a message routes
clock.addListener("pulse", python.name, "ticktock")
clock.addListener("pulse","log","log")
# start the clock
clock.setInterval(1000)
clock.startClock()
# optional : wait the first loop before execution start
# clock.startClock(1)
#
| # start the services
python = Runtime.start("python","Python")
clock = Runtime.start("clock","Clock")
log = Runtime.start("log","Log")
audio = Runtime.start("audio","AudioFile")
# define a ticktock method
def ticktock(timedata):
print timedata
audio.playResource("resource/Clock/tick.mp3")
#create a message routes
clock.addListener("pulse", python.name, "ticktock")
clock.addListener("pulse","log","log")
# start the clock
clock.setInterval(1000)
clock.startClock()
# optional : wait the first loop before execution start
# clock.startClock(1)
| apache-2.0 | Python |
65b397e3911d64e047cbb66f90cf15598f626bae | Remove unused code | liyi193328/seq2seq,google/seq2seq,liyi193328/seq2seq,kontact-chan/seq2seq,chunfengh/seq2seq,kontact-chan/seq2seq,shashankrajput/seq2seq,liyi193328/seq2seq,shashankrajput/seq2seq,google/seq2seq,chunfengh/seq2seq,shashankrajput/seq2seq,google/seq2seq,kontact-chan/seq2seq,chunfengh/seq2seq,kontact-chan/seq2seq,liyi193328/seq2seq,google/seq2seq,shashankrajput/seq2seq,chunfengh/seq2seq,liyi193328/seq2seq | seq2seq/inference/inference.py | seq2seq/inference/inference.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates model predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from seq2seq.training import utils as training_utils
def create_inference_graph(model, input_pipeline, batch_size=32):
"""Creates a graph to perform inference.
Args:
task: An `InferenceTask` instance.
input_pipeline: An instance of `InputPipeline` that defines
how to read and parse data.
batch_size: The batch size used for inference
Returns:
The return value of the model function, typically a tuple of
(predictions, loss, train_op).
"""
# TODO: This doesn't really belong here.
# How to get rid of this?
if hasattr(model, "use_beam_search"):
if model.use_beam_search:
tf.logging.info("Setting batch size to 1 for beam search.")
batch_size = 1
input_fn = training_utils.create_input_fn(
pipeline=input_pipeline,
batch_size=batch_size,
allow_smaller_final_batch=True)
# Build the graph
features, labels = input_fn()
return model(features=features, labels=labels, params=None)
| # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates model predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from seq2seq.training import utils as training_utils
def create_predictions_iter(predictions_dict, sess):
"""Runs prediciton fetches in a sessions and flattens batches as needed to
return an iterator of predictions. Yield elements until an
OutOfRangeError for the feeder queues occurs.
Args:
predictions_dict: The dictionary to be fetched. This will be passed
to `session.run`. The first dimensions of each element in this
dictionary is assumed to be the batch size.
sess: The Session to use.
Returns:
An iterator of the same shape as predictions_dict, but with one
element at a time and the batch dimension removed.
"""
with tf.contrib.slim.queues.QueueRunners(sess):
while True:
try:
predictions_ = sess.run(predictions_dict)
batch_length = list(predictions_.values())[0].shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in predictions_.items()}
except tf.errors.OutOfRangeError:
break
def create_inference_graph(model, input_pipeline, batch_size=32):
"""Creates a graph to perform inference.
Args:
task: An `InferenceTask` instance.
input_pipeline: An instance of `InputPipeline` that defines
how to read and parse data.
batch_size: The batch size used for inference
Returns:
The return value of the model function, typically a tuple of
(predictions, loss, train_op).
"""
# TODO: This doesn't really belong here.
# How to get rid of this?
if model.params["inference.beam_search.beam_width"] > 1:
tf.logging.info("Setting batch size to 1 for beam search.")
batch_size = 1
input_fn = training_utils.create_input_fn(
pipeline=input_pipeline,
batch_size=batch_size,
allow_smaller_final_batch=True)
# Build the graph
features, labels = input_fn()
return model(features=features, labels=labels, params=None)
| apache-2.0 | Python |
d8c873b2c135bce14cf909086c4a7b749e013a10 | Check for _type | opencivicdata/pupa,mileswwatkins/pupa,datamade/pupa,rshorey/pupa,mileswwatkins/pupa,datamade/pupa,rshorey/pupa,influence-usa/pupa,influence-usa/pupa,opencivicdata/pupa | tools/scruffy/checkers/common.py | tools/scruffy/checkers/common.py | from .. import Check
from ..core import db
def common_checks(obj, singular, plural):
if obj.get('jurisdiction_id'):
org = db.metadata.find_one({
"_id": obj['jurisdiction_id']
})
if org is None:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-unlinked-jurisdiction-id' % (singular),
severity='important')
if obj.get("_type") is None:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-is-missing-_type' % (singular),
severity='critical')
elif obj['_type'] != singular:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-invalid-_type' % (singular),
severity='critical')
if obj.get('sources', []) == []:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-no-sources' % (singular),
severity='critical')
if all([x in obj for x in ['created_at', 'updated_at']]):
if obj['created_at'] > obj['updated_at']:
yield Check(collection=plural,
id=obj['_id'],
tagname='updated-before-creation',
severity='critical')
def resolve(type_, id_):
collection = {
"person": db.people,
"organization": db.organizations,
"bill": db.bills,
}[type_]
return collection.find_one({"_id": id_})
| from .. import Check
from ..core import db
def common_checks(obj, singular, plural):
if obj.get('jurisdiction_id'):
org = db.metadata.find_one({
"_id": obj['jurisdiction_id']
})
if org is None:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-unlinked-jurisdiction-id' % (singular),
severity='important')
if obj['_type'] != singular:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-invalid-_type' % (singular),
severity='critical')
if obj.get('sources', []) == []:
yield Check(collection=plural,
id=obj['_id'],
tagname='%s-has-no-sources' % (singular),
severity='critical')
if all([x in obj for x in ['created_at', 'updated_at']]):
if obj['created_at'] > obj['updated_at']:
yield Check(collection=plural,
id=obj['_id'],
tagname='updated-before-creation',
severity='critical')
def resolve(type_, id_):
collection = {
"person": db.people,
"organization": db.organizations,
"bill": db.bills,
}[type_]
return collection.find_one({"_id": id_})
| bsd-3-clause | Python |
ad79dea7e0cd0af94cc17635a9ed72995af3bbd3 | Bump version to 0.0.2 | kgriffs/setec,kgriffs/setec | setec/version.py | setec/version.py | """Package version."""
__version__ = '0.0.2'
"""Current version."""
| """Package version."""
__version__ = '0.0.1'
"""Current version."""
| apache-2.0 | Python |
9cbc286e9e9ddeda8751250ad880b513bd28619e | Add the Twitter and Facebook IDs for testing. Refs #251 | CulturePlex/pybossa,CulturePlex/pybossa,PyBossa/pybossa,geotagx/geotagx-pybossa-archive,proyectos-analizo-info/pybossa-analizo-info,jean/pybossa,geotagx/pybossa,proyectos-analizo-info/pybossa-analizo-info,PyBossa/pybossa,OpenNewsLabs/pybossa,proyectos-analizo-info/pybossa-analizo-info,geotagx/pybossa,stefanhahmann/pybossa,geotagx/geotagx-pybossa-archive,harihpr/tweetclickers,jean/pybossa,geotagx/geotagx-pybossa-archive,inteligencia-coletiva-lsd/pybossa,CulturePlex/pybossa,OpenNewsLabs/pybossa,inteligencia-coletiva-lsd/pybossa,geotagx/geotagx-pybossa-archive,stefanhahmann/pybossa,Scifabric/pybossa,geotagx/geotagx-pybossa-archive,Scifabric/pybossa,harihpr/tweetclickers | settings_test.py | settings_test.py | SQLALCHEMY_DATABASE_TEST_URI = 'postgresql://postgres:@localhost/pybossa'
GOOGLE_CLIENT_ID = ''
GOOGLE_CLIENT_SECRET = ''
TWITTER_CONSUMER_KEY=''
TWITTER_CONSUMER_SECRET=''
FACEBOOK_APP_ID=''
FACEBOOK_APP_SECRET=''
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
ITSDANGEORUSKEY = 'its-dangerous-key'
LOGO = 'logo.png'
MAIL_SERVER = 'localhost'
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_PORT = 25
MAIL_FAIL_SILENTLY = False
DEFAULT_MAIL_SENDER = 'PyBossa Support <info@pybossa.com>'
ANNOUNCEMENT = {'admin': 'Root Message', 'user': 'User Message', 'owner': 'Owner Message'}
| SQLALCHEMY_DATABASE_TEST_URI = 'postgresql://postgres:@localhost/pybossa'
GOOGLE_CLIENT_ID = ''
GOOGLE_CLIENT_SECRET = ''
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
ITSDANGEORUSKEY = 'its-dangerous-key'
LOGO = 'logo.png'
MAIL_SERVER = 'localhost'
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_PORT = 25
MAIL_FAIL_SILENTLY = False
DEFAULT_MAIL_SENDER = 'PyBossa Support <info@pybossa.com>'
ANNOUNCEMENT = {'admin': 'Root Message', 'user': 'User Message', 'owner': 'Owner Message'}
| agpl-3.0 | Python |
9eb97ccd871afba7f2fbd0f59771bb840a32f939 | fix copy and paste leftovers | Impactstory/total-impact-core,total-impact/total-impact-core,Impactstory/total-impact-core,total-impact/total-impact-core,total-impact/total-impact-core,total-impact/total-impact-core,Impactstory/total-impact-core,Impactstory/total-impact-core | totalimpact/providers/bibjson.py | totalimpact/providers/bibjson.py | from StringIO import StringIO
import json, re
from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError, ProviderTimeout, ProviderServerError
from totalimpact import unicode_helpers
import logging
logger = logging.getLogger('ti.providers.bibjson')
class Bibjson(Provider):
example_id = None
url = ""
descr = ""
def _to_unicode(self, text):
text = unicode_helpers.to_unicode_or_bust(text)
return text
def parse(self, bibjson_list):
ret = []
for bibjson_entry in bibjson_list:
full_entry = bibjson_entry
try:
full_entry["authors"] = self._to_unicode(re.sub(", \d+", "", full_entry["marker"]))
except (KeyError, AttributeError):
full_entry["authors"] = ""
try:
full_entry["first_author"] = self._to_unicode(full_entry["marker"].split(",")[0])
except (KeyError, AttributeError):
full_entry["first_author"] = ""
try:
pages = full_entry["pages"]
full_entry["first_page"] = pages.split("--")[0]
except KeyError:
full_entry["first_page"] = ""
try:
full_entry["title"] = full_entry["booktitle"]
except (KeyError, AttributeError):
pass
ret.append(full_entry)
return ret
def member_items(self, bibjson_contents, cache_enabled=True):
logger.debug(u"%20s getting member_items for bibjson" % (self.provider_name))
parsed_bibjson = self.parse(bibjson_contents)
aliases = [("biblio", entry) for entry in parsed_bibjson]
return(aliases)
| from StringIO import StringIO
import json, re
from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError, ProviderTimeout, ProviderServerError
from totalimpact import unicode_helpers
import logging
logger = logging.getLogger('ti.providers.bibjson')
class Bibjson(Provider):
example_id = None
url = ""
descr = ""
def _to_unicode(self, text):
text = unicode_helpers.to_unicode_or_bust(text)
return text
def parse(self, bibjson_list):
ret = []
for bibjson_entry in bibjson_list:
full_entry = bibjson_entry
try:
full_entry["authors"] = self._to_unicode(re.sub(", \d+", "", full_entry["marker"]))
except (KeyError, AttributeError):
full_entry["authors"] = ""
try:
full_entry["first_author"] = self._to_unicode(full_entry["marker"].split(",")[0])
except (KeyError, AttributeError):
full_entry["first_author"] = ""
try:
pages = full_entry["pages"]
full_entry["first_page"] = pages.split("--")[0]
except KeyError:
full_entry["first_page"] = ""
try:
full_entry["title"] = full_entry["booktitle"]
except (KeyError, AttributeError):
pass
ret.append(full_entry)
return ret
def member_items(self, bibtex_contents, cache_enabled=True):
logger.debug(u"%20s getting member_items for bibtex" % (self.provider_name))
parsed_bibtex = self.parse(bibtex_contents)
aliases = [("biblio", entry) for entry in parsed_bibtex]
return(aliases)
| mit | Python |
abf2d338762d0d48a3a334c607cef65fa025ac7d | Update extract_strings_qt.py | JSponaugle/ion,JSponaugle/ion,JSponaugle/ion,JSponaugle/ion,JSponaugle/ion,JSponaugle/ion | share/qt/extract_strings_qt.py | share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit | Python |
f9269fe894915e0c81d4313032541085c9982533 | Update interface.py | MCPEBukkit/Python_PE,MCPEBukkit/Python_PE | src/interface.py | src/interface.py | import socket
import logger from src.logging
import sys
import time
class interface(object):
"""docstring for interface"""
def __init__(self, arg):
super(interface, self).__init__()
self.arg = arg
def connection(ip = "0.0.0.0", port = 19132):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bind = s.bind(ip, port)
if not bind:
logging.warning("Couldn't bind to "+ ip +":"+port)
time.sleep(5)
sys.exit("Process Eliminated.")
| import socket
import logger from src.logging
import sys
import time
class interface(object):
"""docstring for interface"""
def __init__(self, arg):
super(interface, self).__init__()
self.arg = arg
def connection(ip = "0.0.0.0", port = 19132):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bind = s.bind(ip, port)
if not bind:
logging.warning("Couldn't bind to "+ ip +":"+port)
time.sleep(5)
sys.exit("Process Eliminated.")
| mit | Python |
8bd9d8fbdbfd2c5ac430ef64af932c67e58b887e | Add topic-command | Thor77/TeamspeakIRC | tsversion.py | tsversion.py | import irc3
from irc3.plugins.command import command
from irc3.plugins.cron import cron
from teamspeak_web_utils import latest_version
@irc3.plugin
class TSVersion(object):
def __init__(self, bot):
self.bot = bot
self.client_version = None
self.server_version = None
config = bot.config.get('tsversion', {})
self.channel = config.get('channel')
self.topic_template = config.get(
'topic',
'Client: {client} Server: {server}'
)
@cron('1 * * * *')
def fetch_version(self):
new_client, new_server = latest_version()
if self.channel:
# Notify channel
if self.client_version is not None and \
self.client_version != new_client:
self.bot.privmsg(self.channel,
'New client release: {}'.format(new_client))
if self.server_version is not None and \
self.server_version != new_server:
self.bot.privmsg(self.channel,
'New server release: {}'.format(new_server))
self.client_version = new_client
self.server_version = new_server
@command(permission='view')
def tsversion(self, mask, target, args):
'''Check latest Teamspeak3 Server/Client-version
%%tsversion
'''
if not self.client_version or not self.server_version:
self.fetch_version()
return 'Client: {} Server: {}'.format(
self.client_version, self.server_version)
@command(permission='admin')
def topic(self, mask, target, args):
'''Set topic to contain Teamspeak3 Server/Client-version
%%topic
'''
if self.channel and self.topic_template:
topic = self.topic_template.format(
client=self.client_version, server=self.server_version
)
self.bot.topic('#' + self.channel, topic)
| import irc3
from irc3.plugins.command import command
from irc3.plugins.cron import cron
from teamspeak_web_utils import latest_version
@irc3.plugin
class TSVersion(object):
def __init__(self, bot):
self.bot = bot
self.client_version = None
self.server_version = None
config = bot.config.get('tsversion', {})
self.channel = config.get('channel')
@cron('1 * * * *')
def fetch_version(self):
new_client, new_server = latest_version()
if self.channel:
# Notify channel
if self.client_version is not None and \
self.client_version != new_client:
self.bot.privmsg(self.channel,
'New client release: {}'.format(new_client))
if self.server_version is not None and \
self.server_version != new_server:
self.bot.privmsg(self.channel,
'New server release: {}'.format(new_server))
self.client_version = new_client
self.server_version = new_server
@command(permission='view')
def tsversion(self, mask, target, args):
'''Check latest Teamspeak3 Server/Client-version
%%tsversion
'''
if not self.client_version or not self.server_version:
self.fetch_version()
return 'Client: {} Server: {}'.format(
self.client_version, self.server_version)
| mit | Python |
f22eae158e337f4438aa367c13b93bf40085f242 | add def unicode in class Achievement | kaduuuken/achievementsystem,kaduuuken/achievementsystem | achievements/models.py | achievements/models.py | from django.db import models
from django.contrib.auth.models import User
from filebrowser.fields import FileBrowseField
from django.utils.translation import ugettext_lazy as _
import validate
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255)
parent_category = models.ForeignKey('self', blank=True, null=True, related_name="child_categories")
def __unicode__(self):
if (self.parent_category != None):
return "%s -> %s" % (self.parent_category, self.name)
else:
return "%s" % (self.name)
class Achievement(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
points = models.IntegerField(blank=False, default=0)
icon = FileBrowseField(_("Icon"), directory='icons/', format='image', max_length=255, blank=False)
category = models.ForeignKey(Category)
def __unicode__(self):
return self.name
class UserAchievement(Achievement):
users = models.ManyToManyField(User, related_name="achievements")
class ProgressAchievement(Achievement):
required_amount = models.PositiveIntegerField()
users = models.ManyToManyField(User, related_name="progress_achievements", through="Progress")
class Progress(models.Model):
user = models.ForeignKey(User)
progress_achievement = models.ForeignKey(ProgressAchievement)
achieved_amount = models.PositiveIntegerField()
class Task(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
class TaskAchievement(Achievement):
tasks = models.ManyToManyField(Task)
users = models.ManyToManyField(User, related_name="task_achievements", through="TaskProgress")
class TaskProgress(models.Model):
user = models.ForeignKey(User)
task_achievement = models.ForeignKey(TaskAchievement)
completed_tasks = models.ManyToManyField(Task, limit_choices_to={})
class CollectionAchievement(Achievement):
achievements = models.ManyToManyField(Achievement, related_name="collection_achievements")
class Trophies(models.Model):
achievement = models.ForeignKey(Achievement, blank=True)
user = models.ForeignKey(User)
position = models.PositiveIntegerField(validators=[validate.validate_max()])
class Meta:
unique_together = ("user","position") | from django.db import models
from django.contrib.auth.models import User
from filebrowser.fields import FileBrowseField
from django.utils.translation import ugettext_lazy as _
import validate
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255)
parent_category = models.ForeignKey('self', blank=True, null=True, related_name="child_categories")
def __unicode__(self):
if (self.parent_category != None):
return "%s -> %s" % (self.parent_category, self.name)
else:
return "%s" % (self.name)
class Achievement(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
points = models.IntegerField(blank=False, default=0)
icon = FileBrowseField(_("Icon"), directory='icons/', format='image', max_length=255, blank=False)
category = models.ForeignKey(Category)
class UserAchievement(Achievement):
users = models.ManyToManyField(User, related_name="achievements")
class ProgressAchievement(Achievement):
required_amount = models.PositiveIntegerField()
users = models.ManyToManyField(User, related_name="progress_achievements", through="Progress")
class Progress(models.Model):
user = models.ForeignKey(User)
progress_achievement = models.ForeignKey(ProgressAchievement)
achieved_amount = models.PositiveIntegerField()
class Task(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
class TaskAchievement(Achievement):
tasks = models.ManyToManyField(Task)
users = models.ManyToManyField(User, related_name="task_achievements", through="TaskProgress")
class TaskProgress(models.Model):
user = models.ForeignKey(User)
task_achievement = models.ForeignKey(TaskAchievement)
completed_tasks = models.ManyToManyField(Task, limit_choices_to={})
class CollectionAchievement(Achievement):
achievements = models.ManyToManyField(Achievement, related_name="collection_achievements")
class Trophies(models.Model):
achievement = models.ForeignKey(Achievement, blank=True)
user = models.ForeignKey(User)
position = models.PositiveIntegerField(validators=[validate.validate_max()])
class Meta:
unique_together = ("user","position") | bsd-2-clause | Python |
be0a878a0b8cb87491967bce30e503b62547a5c7 | add class Progress | kaduuuken/achievementsystem,kaduuuken/achievementsystem | achievements/models.py | achievements/models.py | from django.db import models
from django.contrib.auth.models import User
from filebrowser.fields import FileBrowseField
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255)
parent_category = models.ForeignKey('self', blank=True, null=True, related_name="child_categories")
class Achievement(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
points = models.IntegerField(blank=False, default=0)
icon = FileBrowseField(_("Icon"), directory='icons/', format='image', max_length=255, blank=False)
category = models.ForeignKey(Category)
class UserAchievement(Achievement):
users = models.ManyToManyField(User, related_name="achievements")
class ProgressAchievement(Achievement):
required_amount = models.PositiveIntegerField()
users = models.ManyToManyField(User, related_name="progress_achievements", through="Progress")
class Progress(models.Model):
user = models.ForeignKey(User)
progress_achievement = models.ForeignKey(ProgressAchievement)
achieved_amount = models.PositiveIntegerField() | from django.db import models
from django.contrib.auth.models import User
from filebrowser.fields import FileBrowseField
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255)
parent_category = models.ForeignKey('self', blank=True, null=True, related_name="child_categories")
class Achievement(models.Model):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"))
points = models.IntegerField(blank=False, default=0)
icon = FileBrowseField(_("Icon"), directory='icons/', format='image', max_length=255, blank=False)
category = models.ForeignKey(Category)
class UserAchievement(Achievement):
users = models.ManyToManyField(User, related_name="achievements")
class ProgressAchievement(Achievement):
required_amount = models.PositiveIntegerField()
users = models.ManyToManyField(User, related_name="progress_achievements", through="Progress") | bsd-2-clause | Python |
b58a856d8ec99dd5b55037859959057bbfc3b87c | bump benchmark version | slundberg/shap,slundberg/shap,slundberg/shap,slundberg/shap | shap/__init__.py | shap/__init__.py | # flake8: noqa
__version__ = '0.27.3a'
from .explainers.kernel import KernelExplainer, kmeans
from .explainers.sampling import SamplingExplainer
from .explainers.tree import TreeExplainer, Tree
from .explainers.deep import DeepExplainer
from .explainers.gradient import GradientExplainer
from .explainers.linear import LinearExplainer
from .plots.summary import summary_plot
from .plots.dependence import dependence_plot
from .plots.force import force_plot, initjs
from .plots.image import image_plot
from .plots.monitoring import monitoring_plot
from .plots.embedding import embedding_plot
from . import datasets
from . import benchmark
from .explainers import other
from .common import approximate_interactions, hclust_ordering
| # flake8: noqa
__version__ = '0.27.3'
from .explainers.kernel import KernelExplainer, kmeans
from .explainers.sampling import SamplingExplainer
from .explainers.tree import TreeExplainer, Tree
from .explainers.deep import DeepExplainer
from .explainers.gradient import GradientExplainer
from .explainers.linear import LinearExplainer
from .plots.summary import summary_plot
from .plots.dependence import dependence_plot
from .plots.force import force_plot, initjs
from .plots.image import image_plot
from .plots.monitoring import monitoring_plot
from .plots.embedding import embedding_plot
from . import datasets
from . import benchmark
from .explainers import other
from .common import approximate_interactions, hclust_ordering
| mit | Python |
d54b1a1b2faf8ded624385dc8e4b0e8ec5adb869 | Update version. | pmaigutyak/mp-shop,pmaigutyak/mp-shop,pmaigutyak/mp-shop | shop/__init__.py | shop/__init__.py |
__version__ = '2.12'
|
__version__ = '2.11'
| isc | Python |
b8ecb1e86fcbbda0f92314c90fb319c2c50fcf94 | Set DEBUG = False in production | kz26/uchicago-hvz,kz26/uchicago-hvz,kz26/uchicago-hvz | uchicagohvz/production_settings.py | uchicagohvz/production_settings.py | from local_settings import *
DEBUG = False
ALLOWED_HOSTS = ['uchicagohvz.org']
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'uchicagohvz', # Or path to database file if using sqlite3.
'USER': 'user', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# REST framework settings
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# Mandrill email settings
EMAIL_HOST = 'smtp.mandrillapp.com'
from secrets import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD
EMAIL_PORT = '587'
EMAIL_USE_TLS = True | from local_settings import *
settings.DEBUG = False
ALLOWED_HOSTS = ['uchicagohvz.org']
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'uchicagohvz', # Or path to database file if using sqlite3.
'USER': 'user', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# REST framework settings
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# Mandrill email settings
EMAIL_HOST = 'smtp.mandrillapp.com'
from secrets import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD
EMAIL_PORT = '587'
EMAIL_USE_TLS = True | mit | Python |
e62bab8106e96df4e9a248e125c6b18e3700f602 | add unit test for utils | RaRe-Technologies/smart_open,RaRe-Technologies/smart_open | smart_open/tests/test_utils.py | smart_open/tests/test_utils.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import unittest
import smart_open.utils
class ClampTest(unittest.TestCase):
def test_low(self):
self.assertEqual(smart_open.utils.clamp(5, 0, 10), 5)
def test_high(self):
self.assertEqual(smart_open.utils.clamp(11, 0, 10), 10)
def test_out_of_range(self):
self.assertEqual(smart_open.utils.clamp(-1, 0, 10), 0)
def test_check_kwargs():
import smart_open.s3
kallable = smart_open.s3.open
kwargs = {'client': 'foo', 'unsupported': 'bar', 'client_kwargs': 'boaz'}
supported = smart_open.utils.check_kwargs(kallable, kwargs)
assert supported == {'client': 'foo', 'client_kwargs': 'boaz'}
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import unittest
import smart_open.utils
class ClampTest(unittest.TestCase):
def test_low(self):
self.assertEqual(smart_open.utils.clamp(5, 0, 10), 5)
def test_high(self):
self.assertEqual(smart_open.utils.clamp(11, 0, 10), 10)
def test_out_of_range(self):
self.assertEqual(smart_open.utils.clamp(-1, 0, 10), 0)
| mit | Python |
2dad5140ceeb7635374203b55727eb19c3264875 | fix template path in urls | crobays/django-boilerplate,crobays/django-boilerplate,crobays/boilerplate-django,maikelvl/django-boilerplate,crobays/django-boilerplate,maikelvl/django-boilerplate,maikelvl/django-boilerplate,crobays/boilerplate-django,crobays/boilerplate-django | src/main/urls.py | src/main/urls.py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import TemplateView
urlpatterns = [
# Examples:
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
urlpatterns = urlpatterns + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
) | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import TemplateView
urlpatterns = [
# Examples:
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
urlpatterns = urlpatterns + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
) | mit | Python |
551f7bf91a91420de6ac4c4b5767c7d926c9a500 | Allow camera to be chosen by config | exhuma/raspicam,exhuma/raspicam,exhuma/raspicam | raspicam/main.py | raspicam/main.py | import logging
from config_resolver import Config
import cv2
from camera import USBCam, PiCamera
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = self._get_framesource()
self.initialised = True
LOG.info('Application successfully initialised.')
def _get_framesource(self):
kind = self.config.get('framesource', 'kind').lower()
raw_arguments = self.config.get('framesource', 'arguments', default='')
if raw_arguments.strip():
arguments = [arg.strip() for arg in raw_arguments.split(',')]
else:
arguments = []
if kind == 'usb':
if len(arguments) == 0:
index = -1
else:
index = int(arguments[0])
return USBCam(index).frame_generator()
elif kind == 'raspberrypi':
return PiCamera().frame_generator()
else:
raise ValueError('%s is an unsupported frame source!')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
| import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
| mit | Python |
562d84c69177b684a148e7818c1e47312a473c54 | Add serializers for UserOrders API | dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4 | apps/shop/serializers.py | apps/shop/serializers.py | # -*- coding: utf-8 -*-
from rest_framework import serializers
from apps.authentication.models import OnlineUser as User
from apps.gallery.serializers import ResponsiveImageSerializer
from apps.inventory.models import Item, ItemCategory
from apps.payment.models import PaymentTransaction
from apps.shop.models import Order, OrderLine
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = (
'object_id', 'quantity',
)
class OrderLineSerializer(serializers.ModelSerializer):
orders = OrderSerializer(many=True)
def create(self, validated_data):
order_list = validated_data.pop("orders")
order_line = OrderLine.objects.create(**validated_data)
while len(order_list) > 0:
order_data = order_list.pop()
order = Order(order_line=order_line, **order_data)
item = Item.objects.get(pk=order.object_id)
order.content_object = item
order.price = item.price
order.save()
order_line.pay()
return order_line
class Meta:
model = OrderLine
fields = (
'user', 'orders',
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'pk', 'first_name', 'last_name', 'saldo',
)
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentTransaction
fields = (
'user', 'amount',
)
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = ItemCategory
fields = (
'pk', 'name'
)
class ItemSerializer(serializers.ModelSerializer):
image = ResponsiveImageSerializer()
category = CategorySerializer()
class Meta:
model = Item
fields = (
'pk', 'name', 'price', 'description', 'image', 'category'
)
class UserOrderSerializer(serializers.ModelSerializer):
content_object = ItemSerializer()
class Meta:
model = Order
fields = ('price', 'quantity', 'content_object')
class UserOrderLineSerializer(serializers.ModelSerializer):
orders = UserOrderSerializer(many=True)
class Meta:
model = OrderLine
fields = ('orders', 'paid', 'datetime')
| # -*- coding: utf-8 -*-
from rest_framework import serializers
from apps.authentication.models import OnlineUser as User
from apps.gallery.serializers import ResponsiveImageSerializer
from apps.inventory.models import Item, ItemCategory
from apps.payment.models import PaymentTransaction
from apps.shop.models import Order, OrderLine
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = (
'object_id', 'quantity',
)
class OrderLineSerializer(serializers.ModelSerializer):
orders = OrderSerializer(many=True)
def create(self, validated_data):
order_list = validated_data.pop("orders")
order_line = OrderLine.objects.create(**validated_data)
while len(order_list) > 0:
order_data = order_list.pop()
order = Order(order_line=order_line, **order_data)
item = Item.objects.get(pk=order.object_id)
order.content_object = item
order.price = item.price
order.save()
order_line.pay()
return order_line
class Meta:
model = OrderLine
fields = (
'user', 'orders',
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'pk', 'first_name', 'last_name', 'saldo',
)
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentTransaction
fields = (
'user', 'amount',
)
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = ItemCategory
fields = (
'pk', 'name'
)
class ItemSerializer(serializers.ModelSerializer):
image = ResponsiveImageSerializer()
category = CategorySerializer()
class Meta:
model = Item
fields = (
'pk', 'name', 'price', 'description', 'image', 'category'
)
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.