commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
df301fad7106b0631215b96299801b81ccc38a0e | Update geoipupdater.py | mortn/geoipupdater | geoipupdater.py | geoipupdater.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update GeoIP.dat if newer version exists on maxmind.com
Intended to run as a cronjob
"""
__author__ = 'Morten Abildgaard <morten@abildgaard.org>'
__version__ = '1.1'
import logging as log
import os, sys
from cStringIO import StringIO
from datetime import datetime
from gzip import GzipFile
from requests import get, head
class GeoIP:
def __init__(self):
self.url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz'
log.basicConfig(level=log.DEBUG,format='%(asctime)s %(levelname)s %(message)s',
filename='%s.log'%os.path.abspath(__file__)[:-3])
self.datfile = '/usr/share/GeoIP/GeoIP.dat'
log.info('Checking for newer version of %s' % self.datfile)
self.update()
exit()
def update(self):
r = head(self.url)
if r.headers and 'last-modified' in r.headers:
remote_lm = datetime.strptime(r.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')
local_lm = self.get_last_modified()
if remote_lm > local_lm:
log.info('Updating. remote_lm (%s) seems newer than local_lm (%s)' % (remote_lm,local_lm))
try:
r = get(self.url)
log.debug(r.headers)
data = GzipFile('','r',0,StringIO(r.content)).read()
with open(self.datfile, 'w') as f: f.write(data)
except IOError:
log.error('Unable to write to file %s' % self.datfile)
exit(sys.exc_info())
except Exception:
log.error(sys.exc_info()[1])
else:
log.info('No newer version found online')
def get_last_modified(self):
try:
return datetime.fromtimestamp(os.path.getmtime(self.datfile))
except Exception:
return datetime.fromtimestamp(1)
if __name__ == '__main__':
sys.exit(GeoIP())
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update GeoIP.dat if newer version exists on maxmind.com
Intended to run as a cronjob
"""
__author__ = 'Morten Abildgaard <morten@abildgaard.org>'
__version__ = '1.1'
import logging as log
import os, sys
from cStringIO import StringIO
from datetime import datetime
from gzip import GzipFile
from requests import get, head
class GeoIP:
def __init__(self):
self.url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz'
log.basicConfig(level=log.DEBUG,format='%(asctime)s %(levelname)s %(message)s',
filename='%s.log'%os.path.abspath(__file__)[:-3])
self.datfile = '/usr/share/GeoIP/GeoIP.dat'
log.info('Checking for newer version of %s' % self.datfile)
self.update()
exit()
def update(self):
r = head(self.url)
if r.headers and 'last-modified' in r.headers:
remote_lm = datetime.strptime(r.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')
local_lm = self.get_last_modified()
if remote_lm > local_lm:
log.info('Updating. remote_lm (%s) seems newer than local_lm (%s)' % (remote_lm,local_lm))
try:
r = get(self.url)
log.debug(r.headers)
data = GzipFile('','r',0,StringIO(r.content)).read()
with open(self.datfile, 'w') as f: f.write(data)
except IOError:
log.error('Unable to write to file %s' % self.datfile)
exit(sys.exc_info())
except Exception:
log.error(sys.exc_info()[1])
def get_last_modified(self):
try:
return datetime.fromtimestamp(os.path.getmtime(self.datfile))
except Exception:
return datetime.fromtimestamp(1)
if __name__ == '__main__':
sys.exit(GeoIP())
| apache-2.0 | Python |
3d8d181c6aea1c80c860b6735c878899baa57f70 | Update main.py | jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi | apps/telegram/diskreport/main.py | apps/telegram/diskreport/main.py | # -*- coding: utf-8 -*-
# Author : JeongooonKang (github.com/jeonghoonkang)
import json
import time
import socket
import fcntl
import struct
import os
import datetime
import telegram
import requests
from pytz import timezone
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl (s.fileno(), 0x8915,
struct.pack('256s', bytes(ifname[:15], 'utf-8')))
return ''.join(['%d.' % b for b in info[20:24]])[:-1]
def get_free_space():
ret = os.statvfs('./')
free_space = ret.f_frsize * ret.f_bfree / 1024 / 1024 / 1024 # 기가바이트
return free_space
def send_message(token, chat_id, message):
bot = telegram.Bot(token=token)
bot.sendMessage(chat_id=chat_id, text=message, parse_mode="markdown")
if __name__ == "__main__" :
monitoring_time = datetime.datetime.now(timezone("Asia/Seoul"))
message = f"""{monitoring_time}"""
message += ' \n(****)서버 동작중, HDD 잔여용량 ' + str(int(get_free_space())) + 'GByte IP주소: '
local_ip = get_ip_address('enp1s0')
message += local_ip
with open("telegramconfig.json") as f:
settings = json.load(f)
for x,y in settings.items():
#print (x, y)
if x == "telegram_bot_setting":
for sub in y:
#print (sub)
token = sub["token"]
chat_id = sub["chat_id"]
send_message(token=token, chat_id=chat_id, message=message)
print ("finish end of sending telegram message via Bot, good bye .... ")
| # -*- coding: utf-8 -*-
# Author : JeongooonKang (github.com/jeonghoonkang)
import json
import time
import local_port_scanning
import socket
import fcntl
import struct
import os
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl (s.fileno(), 0x8915,
struct.pack('256s', bytes(ifname[:15], 'utf-8')))
return ''.join(['%d.' % b for b in info[20:24]])[:-1]
def get_free_space():
ret = os.statvfs('./')
free_space = ret.f_frsize * ret.f_bfree / 1024 / 1024 / 1024 # 기가바이트
#print (free_space)
retrun free_space
def local_port_scanning_job():
message = local_port_scanning.make_message()
local_port_scanning.send_message(token=token, chat_id=chat_id, message=message)
if __name__ == "__main__" :
get_free_space()
#local_ip = get_ip_address('enp1s0')
# should check network device interface name by ifconfig
#print (" open telegram config file, telegramconfig.json")
#with open("telegramconfig.json") as f:
# settings = json.load(f)
token = settings["telegram_bot_setting"]["token"]
chat_id = settings["telegram_bot_setting"]["chat_id"]
#message = local_port_scanning.make_message()
#message = message + 'LOCAL IP address:' + local_ip
#print (message)
#local_port_scanning.send_message(token=token, chat_id=chat_id, message=message)
print ("finish end of sending telegram message via Bot, good bye .... ")
| bsd-2-clause | Python |
3ba1ffaedc35ed4334db1ad33cdcb99f605953c3 | add trace_module to debugutils | doublereedkurt/boltons | boltons/debugutils.py | boltons/debugutils.py | # -*- coding: utf-8 -*-
"""
A small set of utilities useful for debugging misbehaving
applications. Currently this focuses on ways to use :mod:`pdb`, the
built-in Python debugger.
"""
__all__ = ['pdb_on_signal', 'pdb_on_exception', 'trace_module']
def pdb_on_signal(signalnum=None):
"""Installs a signal handler for *signalnum*, which defaults to
``SIGINT``, or keyboard interrupt/ctrl-c. This signal handler
launches a :mod:`pdb` breakpoint. Results vary in concurrent
systems, but this technique can be useful for debugging infinite
loops, or easily getting into deep call stacks.
Args:
signalnum (int): The signal number of the signal to handle
with pdb. Defaults to :mod:`signal.SIGINT`, see
:mod:`signal` for more information.
"""
import pdb
import signal
if not signalnum:
signalnum = signal.SIGINT
old_handler = signal.getsignal(signalnum)
def pdb_int_handler(sig, frame):
signal.signal(signalnum, old_handler)
pdb.set_trace()
pdb_on_signal(signalnum) # use 'u' to find your code and 'h' for help
signal.signal(signalnum, pdb_int_handler)
return
def pdb_on_exception(limit=100):
"""Installs a handler which, instead of exiting, attaches a
post-mortem pdb console whenever an unhandled exception is
encountered.
Args:
limit (int): the max number of stack frames to display when
printing the traceback
A similar effect can be achieved from the command-line using the
following command::
python -m pdb your_code.py
But ``pdb_on_exception`` allows you to do this conditionally and within
your application. To restore default behavior, just do::
sys.excepthook = sys.__excepthook__
"""
import pdb
import sys
import traceback
def pdb_excepthook(exc_type, exc_val, exc_tb):
traceback.print_tb(exc_tb, limit=limit)
pdb.post_mortem(exc_tb)
sys.excepthook = pdb_excepthook
def trace_module(modules):
'''Prints lines of code as they are executed only within the
given modules, in the current thread.
Compare to '-t' option of trace from the standard library, made usable
by having the condition inverted. Only specifc modules are invluded
in the output, instead of having to itemize modules to exclude.
(Uses sys.settrace() so will interfere with other modules that want to own
the trace function such as coverage, profile, and trace.)
'''
import sys
if type(modules) is not list:
modules = [modules]
globalses = set()
for module in modules:
globalses.add(id(module.__dict__))
def trace(frame, event, arg):
if event == 'line':
print frame.f_code.co_filename, frame.f_code.co_name, frame.f_lineno
if event == 'call':
if id(frame.f_globals) in globalses:
return trace
sys.settrace(trace)
| # -*- coding: utf-8 -*-
"""
A small set of utilities useful for debugging misbehaving
applications. Currently this focuses on ways to use :mod:`pdb`, the
built-in Python debugger.
"""
__all__ = ['pdb_on_signal', 'pdb_on_exception']
def pdb_on_signal(signalnum=None):
"""Installs a signal handler for *signalnum*, which defaults to
``SIGINT``, or keyboard interrupt/ctrl-c. This signal handler
launches a :mod:`pdb` breakpoint. Results vary in concurrent
systems, but this technique can be useful for debugging infinite
loops, or easily getting into deep call stacks.
Args:
signalnum (int): The signal number of the signal to handle
with pdb. Defaults to :mod:`signal.SIGINT`, see
:mod:`signal` for more information.
"""
import pdb
import signal
if not signalnum:
signalnum = signal.SIGINT
old_handler = signal.getsignal(signalnum)
def pdb_int_handler(sig, frame):
signal.signal(signalnum, old_handler)
pdb.set_trace()
pdb_on_signal(signalnum) # use 'u' to find your code and 'h' for help
signal.signal(signalnum, pdb_int_handler)
return
def pdb_on_exception(limit=100):
"""Installs a handler which, instead of exiting, attaches a
post-mortem pdb console whenever an unhandled exception is
encountered.
Args:
limit (int): the max number of stack frames to display when
printing the traceback
A similar effect can be achieved from the command-line using the
following command::
python -m pdb your_code.py
But ``pdb_on_exception`` allows you to do this conditionally and within
your application. To restore default behavior, just do::
sys.excepthook = sys.__excepthook__
"""
import pdb
import sys
import traceback
def pdb_excepthook(exc_type, exc_val, exc_tb):
traceback.print_tb(exc_tb, limit=limit)
pdb.post_mortem(exc_tb)
sys.excepthook = pdb_excepthook
| bsd-3-clause | Python |
ad11ab338b5b04919c18900ece86096dbe3222f8 | Update error message | vimeo/vimeo.py,gabrielgisoldo/vimeo.py | vimeo/exceptions.py | vimeo/exceptions.py | #!/usr/bin/env python
class BaseVimeoException(Exception):
def _get_message(self, response):
json = None
try:
json = response.json()
except:
pass
if json:
message = json['error']
else:
message = response.text
return message
def __init__(self, response, message):
# API error message
self.message = self._get_message(response)
# HTTP status code
self.status_code = response.status_code
super(BaseVimeoException, self).__init__(self.message)
class ObjectLoadFailure(Exception):
def __init__(self, message):
super(ObjectLoadFailure, self).__init__(message)
class UploadTicketCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(UploadTicketCreationFailure, self).__init__(response, message)
class VideoCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(VideoCreationFailure, self).__init__(response, message)
class VideoUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(VideoUploadFailure, self).__init__(response, message)
class PictureCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureCreationFailure, self).__init__(response, message)
class PictureUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureUploadFailure, self).__init__(response, message)
class PictureActivationFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureActivationFailure, self).__init__(response, message)
class TexttrackCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(TexttrackCreationFailure, self).__init__(response, message)
class TexttrackUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(TexttrackUploadFailure, self).__init__(response, message)
class APIRateLimitExededFailure(BaseVimeoException):
def _get_message(self, response):
guidelines = 'https://developer.vimeo.com/guidelines/rate-limiting'
message = super(APIRateLimitExededFailure, self)._get_message(
response
)
limit_reset_time = response.headers.get('x-ratelimit-reset')
if limit_reset_time:
text = '{} \n limit will reset on: {}.\n About this limit: {}'
message = text.format(
message,
limit_reset_time,
guidelines
)
return message
| #!/usr/bin/env python
class BaseVimeoException(Exception):
def _get_message(self, response):
json = None
try:
json = response.json()
except:
pass
if json:
message = json['error']
else:
message = response.text
return message
def __init__(self, response, message):
# API error message
self.message = self._get_message(response)
# HTTP status code
self.status_code = response.status_code
super(BaseVimeoException, self).__init__(self.message)
class ObjectLoadFailure(Exception):
def __init__(self, message):
super(ObjectLoadFailure, self).__init__(message)
class UploadTicketCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(UploadTicketCreationFailure, self).__init__(response, message)
class VideoCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(VideoCreationFailure, self).__init__(response, message)
class VideoUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(VideoUploadFailure, self).__init__(response, message)
class PictureCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureCreationFailure, self).__init__(response, message)
class PictureUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureUploadFailure, self).__init__(response, message)
class PictureActivationFailure(BaseVimeoException):
def __init__(self, response, message):
super(PictureActivationFailure, self).__init__(response, message)
class TexttrackCreationFailure(BaseVimeoException):
def __init__(self, response, message):
super(TexttrackCreationFailure, self).__init__(response, message)
class TexttrackUploadFailure(BaseVimeoException):
def __init__(self, response, message):
super(TexttrackUploadFailure, self).__init__(response, message)
class APIRateLimitExededFailure(BaseVimeoException):
def _get_message(self, response):
guidelines = 'https://developer.vimeo.com/guidelines/rate-limiting'
message = super(APIRateLimitExededFailure, self)._get_message(
response
)
limit_reset_time = response.headers.get('x-ratelimit-reset')
if limit_reset_time:
text = '{} \n limit will be reseted on: {}.\n About this limit: {}'
message = text.format(
message,
limit_reset_time,
guidelines
)
return message
| apache-2.0 | Python |
4a41b33286cf881f0b3aa09c29a4aaa3568b5259 | Convert numpy int to native int for JSON serialization | reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations | website/stats/plots/mimp.py | website/stats/plots/mimp.py | from analyses.mimp import glycosylation_sub_types, run_mimp
from helpers.plots import stacked_bar_plot
from ..store import counter
@counter
@stacked_bar_plot
def gains_and_losses_for_glycosylation_subtypes():
results = {}
effects = 'loss', 'gain'
for source_name in ['mc3', 'clinvar']:
for site_type_name in glycosylation_sub_types:
result = run_mimp(source_name, site_type_name, enzyme_type='catch-all')
if result.empty:
continue
effect_counts = result.effect.value_counts()
results[source_name] = effects, [
int(effect_counts.get(effect, 0))
for effect in effects
]
return results
| from analyses.mimp import glycosylation_sub_types, run_mimp
from helpers.plots import stacked_bar_plot
from ..store import counter
@counter
@stacked_bar_plot
def gains_and_losses_for_glycosylation_subtypes():
results = {}
effects = 'loss', 'gain'
for source_name in ['mc3', 'clinvar']:
for site_type_name in glycosylation_sub_types:
result = run_mimp(source_name, site_type_name, enzyme_type='catch-all')
if result.empty:
continue
effect_counts = result.effect.value_counts()
results[source_name] = effects, [effect_counts.get(effect, 0) for effect in effects]
return results
| lgpl-2.1 | Python |
0872a48326cc19afd4371d153897eae26487b0f4 | fix bug | PytLab/VASPy,PytLab/VASPy | scripts/create_xsd.py | scripts/create_xsd.py | '''
Script to convert CONTCAR to .xsd file
'''
import commands
from vaspy.matstudio import XsdFile
from vaspy.atomco import PosCar
status, output = commands.getstatusoutput('ls *.xsd | head -1')
xsd = XsdFile(filename=output)
poscar = PosCar(filename='CONTCAR')
xsd.data = poscar.data
jobname = output.split('.')[0]
xsd.tofile(filename=jobname+'-y.xsd')
| '''
Script to convert CONTCAR to .xsd file
'''
from vaspy.matstudio import XsdFile
from vaspy.atomco import PosCar
status, output = commands.getstatusoutput('ls *.xsd | head -1')
xsd = XsdFile(filename=output)
poscar = PosCar(filename='CONTCAR')
xsd.data = poscar.data
jobname = output.split('.')[0]
xsd.tofile(filename=jobname+'-y.xsd')
| mit | Python |
e287fe67a7c4aaf231b0eb0003cdd18cb615da47 | Bump version to 18.04.15-1 | charlievieth/GoSubl,charlievieth/GoSubl | gosubl/about.py | gosubl/about.py | import re
import sublime
# GoSublime Globals
ANN = 'a18.04.15-1'
VERSION = 'r18.04.15-1'
VERSION_PAT = re.compile(r'\d{2}[.]\d{2}[.]\d{2}-\d+', re.IGNORECASE)
DEFAULT_GO_VERSION = 'go?'
GO_VERSION_OUTPUT_PAT = re.compile(r'go\s+version\s+(\S+(?:\s+[+]\w+|\s+\([^)]+)?)', re.IGNORECASE)
GO_VERSION_NORM_PAT = re.compile(r'[^\w.+-]+', re.IGNORECASE)
PLATFORM = '%s-%s' % (sublime.platform(), sublime.arch())
MARGO_EXE_PREFIX = 'gosublime.margo_'
MARGO_EXE_SUFFIX = '.exe'
MARGO_EXE = MARGO_EXE_PREFIX+VERSION+'_'+DEFAULT_GO_VERSION+MARGO_EXE_SUFFIX
MARGO_EXE_PAT = re.compile(r'^gosublime\.margo.*\.exe$', re.IGNORECASE)
# CEV: Dev Globals
FORCE_INSTALL = False
| import re
import sublime
# GoSublime Globals
ANN = 'a14.02.25-1'
VERSION = 'r14.12.06-1'
VERSION_PAT = re.compile(r'\d{2}[.]\d{2}[.]\d{2}-\d+', re.IGNORECASE)
DEFAULT_GO_VERSION = 'go?'
GO_VERSION_OUTPUT_PAT = re.compile(r'go\s+version\s+(\S+(?:\s+[+]\w+|\s+\([^)]+)?)', re.IGNORECASE)
GO_VERSION_NORM_PAT = re.compile(r'[^\w.+-]+', re.IGNORECASE)
PLATFORM = '%s-%s' % (sublime.platform(), sublime.arch())
MARGO_EXE_PREFIX = 'gosublime.margo_'
MARGO_EXE_SUFFIX = '.exe'
MARGO_EXE = MARGO_EXE_PREFIX+VERSION+'_'+DEFAULT_GO_VERSION+MARGO_EXE_SUFFIX
MARGO_EXE_PAT = re.compile(r'^gosublime\.margo.*\.exe$', re.IGNORECASE)
# CEV: Dev Globals
FORCE_INSTALL = False
| mit | Python |
a54a2dc1cd055c1a9a897fe09876b7a505b5facf | Add dummy vendor to test settings | django-oscar/django-oscar-sagepay-direct | tests/settings.py | tests/settings.py | SECRET_KEY = 'asdf'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
import logging
logging.disable(logging.CRITICAL)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
# External apps
'oscar_sagepay',
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
OSCAR_SAGEPAY_VENDOR = 'dummy'
from oscar.defaults import * # noqa
# Import private settings used for external tests
try:
from sandbox.private_settings import * # noqa
except ImportError:
pass
| SECRET_KEY = 'asdf'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
import logging
logging.disable(logging.CRITICAL)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
# External apps
'oscar_sagepay',
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
from oscar.defaults import * # noqa
# Import private settings used for external tests
try:
from sandbox.private_settings import * # noqa
except ImportError:
pass
| bsd-3-clause | Python |
f98c0dcabc40a4967c19b76551499afe32026fd9 | Add response class and define is_response() on test strategy | nirmalvp/python-social-auth,clef/python-social-auth,bjorand/python-social-auth,nirmalvp/python-social-auth,python-social-auth/social-storage-sqlalchemy,barseghyanartur/python-social-auth,noodle-learns-programming/python-social-auth,DhiaEddineSaidi/python-social-auth,jameslittle/python-social-auth,VishvajitP/python-social-auth,mrwags/python-social-auth,drxos/python-social-auth,yprez/python-social-auth,noodle-learns-programming/python-social-auth,hsr-ba-fs15-dat/python-social-auth,Andygmb/python-social-auth,MSOpenTech/python-social-auth,mchdks/python-social-auth,VishvajitP/python-social-auth,imsparsh/python-social-auth,JerzySpendel/python-social-auth,DhiaEddineSaidi/python-social-auth,duoduo369/python-social-auth,JJediny/python-social-auth,mchdks/python-social-auth,cmichal/python-social-auth,lneoe/python-social-auth,falcon1kr/python-social-auth,mathspace/python-social-auth,garrett-schlesinger/python-social-auth,bjorand/python-social-auth,nirmalvp/python-social-auth,falcon1kr/python-social-auth,barseghyanartur/python-social-auth,cjltsod/python-social-auth,hsr-ba-fs15-dat/python-social-auth,henocdz/python-social-auth,contracode/python-social-auth,nvbn/python-social-auth,msampathkumar/python-social-auth,nvbn/python-social-auth,SeanHayes/python-social-auth,joelstanner/python-social-auth,Andygmb/python-social-auth,firstjob/python-social-auth,mark-adams/python-social-auth,lawrence34/python-social-auth,tkajtoch/python-social-auth,contracode/python-social-auth,mrwags/python-social-auth,mark-adams/python-social-auth,imsparsh/python-social-auth,lneoe/python-social-auth,san-mate/python-social-auth,jameslittle/python-social-auth,jneves/python-social-auth,iruga090/python-social-auth,ononeor12/python-social-auth,noodle-learns-programming/python-social-auth,frankier/python-social-auth,robbiet480/python-social-auth,python-social-auth/social-app-django,cmichal/python-social-auth,Andygmb/python-social-auth,alrusdi/python-social-auth,mathspace/python-social-auth,daniula/python-social-auth,lamby/python-social-auth,python-social-auth/social-app-django,drxos/python-social-auth,muhammad-ammar/python-social-auth,yprez/python-social-auth,tutumcloud/python-social-auth,jameslittle/python-social-auth,python-social-auth/social-core,rsalmaso/python-social-auth,iruga090/python-social-auth,yprez/python-social-auth,ByteInternet/python-social-auth,michael-borisov/python-social-auth,rsteca/python-social-auth,S01780/python-social-auth,iruga090/python-social-auth,MSOpenTech/python-social-auth,cjltsod/python-social-auth,wildtetris/python-social-auth,henocdz/python-social-auth,mchdks/python-social-auth,bjorand/python-social-auth,SeanHayes/python-social-auth,degs098/python-social-auth,jneves/python-social-auth,msampathkumar/python-social-auth,jeyraof/python-social-auth,henocdz/python-social-auth,python-social-auth/social-core,joelstanner/python-social-auth,clef/python-social-auth,merutak/python-social-auth,daniula/python-social-auth,fearlessspider/python-social-auth,chandolia/python-social-auth,falcon1kr/python-social-auth,python-social-auth/social-app-django,fearlessspider/python-social-auth,firstjob/python-social-auth,alrusdi/python-social-auth,ByteInternet/python-social-auth,tobias47n9e/social-core,daniula/python-social-auth,jneves/python-social-auth,cmichal/python-social-auth,alrusdi/python-social-auth,lawrence34/python-social-auth,imsparsh/python-social-auth,merutak/python-social-auth,python-social-auth/social-app-cherrypy,ononeor12/python-social-auth,MSOpenTech/python-social-auth,tkajtoch/python-social-auth,chandolia/python-social-auth,mathspace/python-social-auth,jeyraof/python-social-auth,webjunkie/python-social-auth,mrwags/python-social-auth,JJediny/python-social-auth,degs098/python-social-auth,fearlessspider/python-social-auth,JerzySpendel/python-social-auth,clef/python-social-auth,barseghyanartur/python-social-auth,ariestiyansyah/python-social-auth,duoduo369/python-social-auth,muhammad-ammar/python-social-auth,joelstanner/python-social-auth,webjunkie/python-social-auth,webjunkie/python-social-auth,mark-adams/python-social-auth,rsteca/python-social-auth,ononeor12/python-social-auth,robbiet480/python-social-auth,degs098/python-social-auth,chandolia/python-social-auth,python-social-auth/social-docs,firstjob/python-social-auth,wildtetris/python-social-auth,lamby/python-social-auth,rsalmaso/python-social-auth,san-mate/python-social-auth,lneoe/python-social-auth,jeyraof/python-social-auth,drxos/python-social-auth,michael-borisov/python-social-auth,JerzySpendel/python-social-auth,wildtetris/python-social-auth,VishvajitP/python-social-auth,robbiet480/python-social-auth,rsteca/python-social-auth,ariestiyansyah/python-social-auth,san-mate/python-social-auth,hsr-ba-fs15-dat/python-social-auth,tutumcloud/python-social-auth,ByteInternet/python-social-auth,muhammad-ammar/python-social-auth,msampathkumar/python-social-auth,S01780/python-social-auth,lamby/python-social-auth,tkajtoch/python-social-auth,contracode/python-social-auth,michael-borisov/python-social-auth,frankier/python-social-auth,garrett-schlesinger/python-social-auth,S01780/python-social-auth,ariestiyansyah/python-social-auth,merutak/python-social-auth,DhiaEddineSaidi/python-social-auth,JJediny/python-social-auth,lawrence34/python-social-auth | tests/strategy.py | tests/strategy.py | from social.strategies.base import BaseStrategy, BaseTemplateStrategy
TEST_URI = 'http://myapp.com'
TEST_HOST = 'myapp.com'
class Redirect(object):
def __init__(self, url):
self.url = url
class Response(object):
def __init__(self, value):
self.value = value
class TestTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
return tpl
def render_string(self, html, context):
return html
class TestStrategy(BaseStrategy):
def __init__(self, *args, **kwargs):
self._request_data = {}
self._settings = {}
self._session = {}
kwargs.setdefault('tpl', TestTemplateStrategy)
super(TestStrategy, self).__init__(*args, **kwargs)
def redirect(self, url):
return Redirect(url)
def get_setting(self, name):
"""Return value for given setting name"""
return self._settings[name]
def html(self, content):
"""Return HTTP response with given content"""
return Response(content)
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return tpl or html
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
return self._request_data
def request_host(self):
"""Return current host value"""
return TEST_HOST
def session_get(self, name, default=None):
"""Return session value for given key"""
return self._session.get(name, default)
def session_set(self, name, value):
"""Set session value for given key"""
self._session[name] = value
def session_pop(self, name):
"""Pop session value for given key"""
return self._session.pop(name, None)
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
return TEST_URI + (path or '')
def set_settings(self, values):
self._settings.update(values)
def set_request_data(self, values):
self._request_data.update(values)
def authenticate(self, *args, **kwargs):
user = super(TestStrategy, self).authenticate(*args, **kwargs)
if isinstance(user, self.storage.user.user_model()):
self.session_set('username', user.username)
return user
def is_response(self, value):
return isinstance(value, (Redirect, Response))
| from social.strategies.base import BaseStrategy, BaseTemplateStrategy
TEST_URI = 'http://myapp.com'
TEST_HOST = 'myapp.com'
class Redirect(object):
def __init__(self, url):
self.url = url
class TestTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
return tpl
def render_string(self, html, context):
return html
class TestStrategy(BaseStrategy):
def __init__(self, *args, **kwargs):
self._request_data = {}
self._settings = {}
self._session = {}
kwargs.setdefault('tpl', TestTemplateStrategy)
super(TestStrategy, self).__init__(*args, **kwargs)
def redirect(self, url):
return Redirect(url)
def get_setting(self, name):
"""Return value for given setting name"""
return self._settings[name]
def html(self, content):
"""Return HTTP response with given content"""
return content
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return tpl or html
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
return self._request_data
def request_host(self):
"""Return current host value"""
return TEST_HOST
def session_get(self, name, default=None):
"""Return session value for given key"""
return self._session.get(name, default)
def session_set(self, name, value):
"""Set session value for given key"""
self._session[name] = value
def session_pop(self, name):
"""Pop session value for given key"""
return self._session.pop(name, None)
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
return TEST_URI + (path or '')
def set_settings(self, values):
self._settings.update(values)
def set_request_data(self, values):
self._request_data.update(values)
def authenticate(self, *args, **kwargs):
user = super(TestStrategy, self).authenticate(*args, **kwargs)
if isinstance(user, self.storage.user.user_model()):
self.session_set('username', user.username)
return user
| bsd-3-clause | Python |
0730a438ce4ef90a9ef18e926a8363b7df19af4f | Use getattr() to support Python 2.6 and older. | metasmile/grit-i18n,sevansahumIlovemuhammad/grit-i18n,lhopps/grit-i18n,joisig/grit-i18n,metasmile/grit-i18n,megamanfx/grit-i18n,invalidstatement/grit-i18n,skidzen/grit-i18n,CrankWheel/grit-i18n,lhopps/grit-i18n,willbittner/grit-i18n,qjw/grit-i18n,megamanfx/grit-i18n,invalidstatement/grit-i18n,qjw/grit-i18n,sevansahumIlovemuhammad/grit-i18n,korusdipl/grit-i18n,korusdipl/grit-i18n,joisig/grit-i18n,willbittner/grit-i18n,skidzen/grit-i18n | grit/lazy_re.py | grit/lazy_re.py | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''In GRIT, we used to compile a lot of regular expressions at parse
time. Since many of them never get used, we use lazy_re to compile
them on demand the first time they are used, thus speeding up startup
time in some cases.
'''
import re
class LazyRegexObject(object):
'''This object creates a RegexObject with the arguments passed in
its constructor, the first time any attribute except the several on
the class itself is accessed. This accomplishes lazy compilation of
the regular expression while maintaining a nearly-identical
interface.
'''
def __init__(self, *args, **kwargs):
self._stash_args = args
self._stash_kwargs = kwargs
self._lazy_re = None
def _LazyInit(self):
if not self._lazy_re:
self._lazy_re = re.compile(*self._stash_args, **self._stash_kwargs)
def __getattribute__(self, name):
if name in ('_LazyInit', '_lazy_re', '_stash_args', '_stash_kwargs'):
return object.__getattribute__(self, name)
else:
self._LazyInit()
return getattr(self._lazy_re, name)
def compile(*args, **kwargs):
'''Creates a LazyRegexObject that, when invoked on, will compile a
re.RegexObject (via re.compile) with the same arguments passed to
this function, and delegate almost all of its methods to it.
'''
return LazyRegexObject(*args, **kwargs)
| #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''In GRIT, we used to compile a lot of regular expressions at parse
time. Since many of them never get used, we use lazy_re to compile
them on demand the first time they are used, thus speeding up startup
time in some cases.
'''
import re
class LazyRegexObject(object):
'''This object creates a RegexObject with the arguments passed in
its constructor, the first time any attribute except the several on
the class itself is accessed. This accomplishes lazy compilation of
the regular expression while maintaining a nearly-identical
interface.
'''
def __init__(self, *args, **kwargs):
self._stash_args = args
self._stash_kwargs = kwargs
self._lazy_re = None
def _LazyInit(self):
if not self._lazy_re:
self._lazy_re = re.compile(*self._stash_args, **self._stash_kwargs)
def __getattribute__(self, name):
if name in ('_LazyInit', '_lazy_re', '_stash_args', '_stash_kwargs'):
return object.__getattribute__(self, name)
else:
self._LazyInit()
return self._lazy_re.__getattribute__(name)
def compile(*args, **kwargs):
'''Creates a LazyRegexObject that, when invoked on, will compile a
re.RegexObject (via re.compile) with the same arguments passed to
this function, and delegate almost all of its methods to it.
'''
return LazyRegexObject(*args, **kwargs)
| bsd-2-clause | Python |
d7232d855d406a26b2485b5c1fcd587e90fddf39 | Fix Runtime warnings on async tests | RazerM/ratelimiter | tests/test_aio.py | tests/test_aio.py | import pytest
from ratelimiter import RateLimiter
@pytest.mark.asyncio
async def test_alock(event_loop):
rl = RateLimiter(max_calls=10, period=0.01)
assert rl._alock is None
async with rl:
pass
alock = rl._alock
assert alock
async with rl:
pass
assert rl._alock is alock
| import pytest
from ratelimiter import RateLimiter
@pytest.mark.asyncio
async def test_alock():
rl = RateLimiter(max_calls=10, period=0.01)
assert rl._alock is None
async with rl:
pass
alock = rl._alock
assert alock
async with rl:
pass
assert rl._alock is alock
| apache-2.0 | Python |
37e31972cc36efd93a92cf52d80bdce8e2a6458e | Add trailing slash to dental plan detail url | agendaodonto/server,agendaodonto/server | app/schedule/urls.py | app/schedule/urls.py | from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from app.schedule.views.clinic import ClinicDetail
from app.schedule.views.clinic import ClinicList, ClinicPatients
from app.schedule.views.dental_plan import DentalPlanList, DentalPlanDetail
from app.schedule.views.dentist import DentistList, DentistDetail
from app.schedule.views.patient import PatientList, PatientDetail, PatientSchedule
from app.schedule.views.schedule import ScheduleList, ScheduleDetail, ScheduleAttendance, ScheduleNotification
urlpatterns = [
# Schedules
url(r'^schedules/$', ScheduleList.as_view(), name='schedules'),
url(r'^schedules/(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule-detail'),
url(r'^schedules/(?P<pk>[0-9]+)/notification/$', ScheduleNotification.as_view(), name='schedule-notification'),
url(r'^schedules/attendance/$', ScheduleAttendance.as_view(), name='schedule-attendance'),
# Patients
url(r'^patients/$', PatientList.as_view(), name='patients'),
url(r'^patients/(?P<pk>[0-9]+)/$', PatientDetail.as_view(), name='patient-detail'),
url(r'^patients/(?P<pk>[0-9]+)/schedules/$', PatientSchedule.as_view(), name='patient-schedules'),
# Dentists
url(r'^dentists/$', DentistList.as_view(), name='dentists'),
url(r'^dentists/me/$', DentistDetail.as_view(), name='dentist-detail'),
# Clinics
url(r'^clinics/$', ClinicList.as_view(), name='clinics'),
url(r'^clinics/(?P<pk>[0-9]+)/$', ClinicDetail.as_view(), name='clinic-detail'),
url(r'^clinics/(?P<pk>[0-9]+)/patients/$', ClinicPatients.as_view(), name='clinic-patients'),
# Dental Plans
url(r'^dental-plans/$', DentalPlanList.as_view(), name='dental-plans'),
url(r'^dental-plans/(?P<pk>[0-9]+)/$', DentalPlanDetail.as_view(), name='dental-plan-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from app.schedule.views.clinic import ClinicDetail
from app.schedule.views.clinic import ClinicList, ClinicPatients
from app.schedule.views.dental_plan import DentalPlanList, DentalPlanDetail
from app.schedule.views.dentist import DentistList, DentistDetail
from app.schedule.views.patient import PatientList, PatientDetail, PatientSchedule
from app.schedule.views.schedule import ScheduleList, ScheduleDetail, ScheduleAttendance, ScheduleNotification
urlpatterns = [
# Schedules
url(r'^schedules/$', ScheduleList.as_view(), name='schedules'),
url(r'^schedules/(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule-detail'),
url(r'^schedules/(?P<pk>[0-9]+)/notification/$', ScheduleNotification.as_view(), name='schedule-notification'),
url(r'^schedules/attendance/$', ScheduleAttendance.as_view(), name='schedule-attendance'),
# Patients
url(r'^patients/$', PatientList.as_view(), name='patients'),
url(r'^patients/(?P<pk>[0-9]+)/$', PatientDetail.as_view(), name='patient-detail'),
url(r'^patients/(?P<pk>[0-9]+)/schedules/$', PatientSchedule.as_view(), name='patient-schedules'),
# Dentists
url(r'^dentists/$', DentistList.as_view(), name='dentists'),
url(r'^dentists/me/$', DentistDetail.as_view(), name='dentist-detail'),
# Clinics
url(r'^clinics/$', ClinicList.as_view(), name='clinics'),
url(r'^clinics/(?P<pk>[0-9]+)/$', ClinicDetail.as_view(), name='clinic-detail'),
url(r'^clinics/(?P<pk>[0-9]+)/patients/$', ClinicPatients.as_view(), name='clinic-patients'),
# Dental Plans
url(r'^dental-plans/$', DentalPlanList.as_view(), name='dental-plans'),
url(r'^dental-plans/(?P<pk>[0-9]+)$', DentalPlanDetail.as_view(), name='dental-plan-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| agpl-3.0 | Python |
97cbc26ee7343b36cdea7766f79a18f1a2e2700c | make main docstring one line for flit | almarklein/pywasm,almarklein/wasmfun,almarklein/wasmfun,almarklein/pywasm,almarklein/wasmfun | wasmfun/__init__.py | wasmfun/__init__.py | """
A Python library that provides tools to handle WASM code.
"""
__version__ = '0.1'
from ._opcodes import OPCODES
from .fields import *
from .util import *
| """
A Python library that provides tools to handle WASM code, like generating
WASM, and perhaps someday interpreting it too.
"""
__version__ = '0.1'
from ._opcodes import OPCODES
from .fields import *
from .util import *
| bsd-2-clause | Python |
85b88eff8eb2d745649c9d8e60ebb45067c8f214 | make launch ipdb_on_exception importable | michelesr/ipdb | ipdb/__init__.py | ipdb/__init__.py | # Copyright (c) 2007, 2010, 2011, 2012 Godefroid Chapelle
#
# This file is part of ipdb.
# GNU package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# GNU package is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
from ipdb.__main__ import set_trace, post_mortem, pm, run, runcall, runeval, launch_ipdb_on_exception
pm # please pyflakes
post_mortem # please pyflakes
run # please pyflakes
runcall # please pyflakes
runeval # please pyflakes
set_trace # please pyflakes
launch_ipdb_on_exception # please pyflakes
| # Copyright (c) 2007, 2010, 2011, 2012 Godefroid Chapelle
#
# This file is part of ipdb.
# GNU package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# GNU package is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
from ipdb.__main__ import set_trace, post_mortem, pm, run, runcall, runeval
pm # please pyflakes
post_mortem # please pyflakes
run # please pyflakes
runcall # please pyflakes
runeval # please pyflakes
set_trace # please pyflakes
| bsd-3-clause | Python |
85415c225de51ace61eff76a493f31bd4cd3955f | fix the incorrect filename | zzw922cn/Automatic_Speech_Recognition,zzw922cn/Automatic_Speech_Recognition | speechvalley/feature/libri/__init__.py | speechvalley/feature/libri/__init__.py | # encoding: utf-8
# ******************************************************
# Author : zzw922cn
# Last modified: 2017-12-09 11:00
# Email : zzw922cn@gmail.com
# Filename : __init__.py
# Description : Feature preprocessing for LibriSpeech dataset
# ******************************************************
from speechvalley.feature.libri.libri_preprocess import preprocess, wav2feature
| # encoding: utf-8
# ******************************************************
# Author : zzw922cn
# Last modified: 2017-12-09 11:00
# Email : zzw922cn@gmail.com
# Filename : __init__.py
# Description : Feature preprocessing for LibriSpeech dataset
# ******************************************************
from speechvalley.feature.libri.libri_proprecess import preprocess, wav2feature
| mit | Python |
f096ff7d7e5b460e40878510d4222d6f82eb3e99 | Bump version for pypi to 0.2018.07.08.0419 | oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb,oduwsdl/ipwb | ipwb/__init__.py | ipwb/__init__.py | __version__ = '0.2018.07.08.0419'
| __version__ = '0.2018.07.08.0414'
| mit | Python |
3a7a258f8e9cf255642dc8fb0fa8c207f73e37cf | add unit test for deprecating properties | yanikou19/monty,gmatteo/monty,materialsvirtuallab/monty,gpetretto/monty,materialsvirtuallab/monty,davidwaroquiers/monty,gmatteo/monty,davidwaroquiers/monty | tests/test_dev.py | tests/test_dev.py | __author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2014, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import unittest
import warnings
from monty.dev import deprecated, requires
class DecoratorTest(unittest.TestCase):
def test_deprecated(self):
def func_a():
pass
@deprecated(func_a)
def func_b():
pass
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
func_b()
# Verify some things
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
def test_deprecated_property(self):
class a(object):
def __init__(self):
pass
@property
def property_a(self):
pass
@property
@deprecated(property_a)
def property_b(self):
return 'b'
@deprecated(property_a)
def func_a(self):
return 'a'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
self.assertEqual(a().property_b, 'b')
# Verify some things
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
self.assertEqual(a().func_a(), 'a')
# Verify some things
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
def test_requires(self):
try:
import fictitious_mod
except ImportError:
fictitious_mod = None
@requires(fictitious_mod is not None, "fictitious_mod is not present.")
def use_fictitious_mod():
print("success")
self.assertRaises(RuntimeError, use_fictitious_mod)
@requires(unittest is not None, "scipy is not present.")
def use_unittest():
return "success"
self.assertEqual(use_unittest(), "success")
if __name__ == "__main__":
unittest.main() | __author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2014, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import unittest
import warnings
from monty.dev import deprecated, requires
class DecoratorTest(unittest.TestCase):
def test_deprecated(self):
def func_a():
pass
@deprecated(func_a)
def func_b():
pass
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
func_b()
# Verify some things
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
def test_requires(self):
try:
import fictitious_mod
except ImportError:
fictitious_mod = None
@requires(fictitious_mod is not None, "fictitious_mod is not present.")
def use_fictitious_mod():
print("success")
self.assertRaises(RuntimeError, use_fictitious_mod)
@requires(unittest is not None, "scipy is not present.")
def use_unittest():
return "success"
self.assertEqual(use_unittest(), "success")
if __name__ == "__main__":
unittest.main() | mit | Python |
b929b73880a7bf3cba97b138ddb96f6c3cf165b0 | add MIDDLEWARE_CLASSES for Django 1.7 | jannon/django-haystack,sgaist/django-haystack,kuanyui/django-haystack,cyanut/django-haystack,django-searchstack/django-searchstack,antonyr/django-haystack,fisle/django-haystack,ruimashita/django-haystack,django-searchstack/django-searchstack,comandrei/django-haystack,jannon/django-haystack,barseghyanartur/django-haystack,fisle/django-haystack,elishowk/django-haystack,streeter/django-haystack,jannon/django-haystack,kuanyui/django-haystack,Stupeflix/django-haystack,elishowk/django-haystack,Stupeflix/django-haystack,zeehio/django-haystack,blancltd/django-haystack,cyanut/django-haystack,elishowk/django-haystack,comandrei/django-haystack,comandrei/django-haystack,zeehio/django-haystack,eventials/django-haystack,celerityweb/django-haystack,fisle/django-haystack,antonyr/django-haystack,barseghyanartur/django-haystack,Stupeflix/django-haystack,celerityweb/django-haystack,streeter/django-haystack,sgaist/django-haystack,ruimashita/django-haystack,blancltd/django-haystack,eventials/django-haystack,barseghyanartur/django-haystack,eventials/django-haystack,kuanyui/django-haystack,vitalyvolkov/django-haystack,django-searchstack/django-searchstack,cyanut/django-haystack,ruimashita/django-haystack,streeter/django-haystack,sgaist/django-haystack,vitalyvolkov/django-haystack,antonyr/django-haystack,celerityweb/django-haystack,blancltd/django-haystack,vitalyvolkov/django-haystack,zeehio/django-haystack | test_haystack/settings.py | test_haystack/settings.py | import os
from tempfile import mkdtemp
SECRET_KEY = "Please do not spew DeprecationWarnings"
# Haystack settings for running tests.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'haystack_tests.db',
}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'haystack',
'test_haystack.discovery',
'test_haystack.core',
'test_haystack.spatial',
'test_haystack.multipleindex',
]
SITE_ID = 1
ROOT_URLCONF = 'test_haystack.core.urls'
HAYSTACK_ROUTERS = ['haystack.routers.DefaultRouter', 'test_haystack.multipleindex.routers.MultipleIndexRouter']
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'test_haystack.mocks.MockEngine',
},
'whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join('tmp', 'test_whoosh_query'),
'INCLUDE_SPELLING': True,
},
'filtered_whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='haystack-multipleindex-filtered-whoosh-tests-'),
'EXCLUDED_INDEXES': ['test_haystack.multipleindex.search_indexes.BarIndex'],
},
'elasticsearch': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': '127.0.0.1:9200/',
'INDEX_NAME': 'test_default',
'INCLUDE_SPELLING': True,
},
'simple': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
'solr': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://localhost:9001/solr/',
'INCLUDE_SPELLING': True,
},
}
SITE_ID = 1
MIDDLEWARE_CLASSES = ('django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware') | import os
from tempfile import mkdtemp
SECRET_KEY = "Please do not spew DeprecationWarnings"
# Haystack settings for running tests.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'haystack_tests.db',
}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'haystack',
'test_haystack.discovery',
'test_haystack.core',
'test_haystack.spatial',
'test_haystack.multipleindex',
]
SITE_ID = 1
ROOT_URLCONF = 'test_haystack.core.urls'
HAYSTACK_ROUTERS = ['haystack.routers.DefaultRouter', 'test_haystack.multipleindex.routers.MultipleIndexRouter']
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'test_haystack.mocks.MockEngine',
},
'whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join('tmp', 'test_whoosh_query'),
'INCLUDE_SPELLING': True,
},
'filtered_whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='haystack-multipleindex-filtered-whoosh-tests-'),
'EXCLUDED_INDEXES': ['test_haystack.multipleindex.search_indexes.BarIndex'],
},
'elasticsearch': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': '127.0.0.1:9200/',
'INDEX_NAME': 'test_default',
'INCLUDE_SPELLING': True,
},
'simple': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
'solr': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://localhost:9001/solr/',
'INCLUDE_SPELLING': True,
},
}
SITE_ID = 1
| bsd-3-clause | Python |
d47d56525f85c5fa8b1f6b817a85479b9eb07582 | Add query_entities to functions module import | joshfriend/sqlalchemy-utils,joshfriend/sqlalchemy-utils,cheungpat/sqlalchemy-utils,marrybird/sqlalchemy-utils,rmoorman/sqlalchemy-utils,spoqa/sqlalchemy-utils,tonyseek/sqlalchemy-utils,tonyseek/sqlalchemy-utils,JackWink/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils | sqlalchemy_utils/functions/__init__.py | sqlalchemy_utils/functions/__init__.py | from .defer_except import defer_except
from .mock import create_mock_engine, mock_engine
from .render import render_expression, render_statement
from .sort_query import sort_query, QuerySorterException
from .database import (
database_exists,
create_database,
drop_database,
escape_like,
is_auto_assigned_date_column,
is_indexed_foreign_key,
non_indexed_foreign_keys,
)
from .orm import (
declarative_base,
getdotattr,
has_changes,
identity,
naturally_equivalent,
query_entities,
primary_keys,
table_name,
)
__all__ = (
create_database,
create_mock_engine,
database_exists,
declarative_base,
defer_except,
drop_database,
escape_like,
getdotattr,
has_changes,
identity,
is_auto_assigned_date_column,
is_indexed_foreign_key,
mock_engine,
naturally_equivalent,
non_indexed_foreign_keys,
primary_keys,
QuerySorterException,
render_expression,
render_statement,
sort_query,
table_name,
)
| from .defer_except import defer_except
from .mock import create_mock_engine, mock_engine
from .render import render_expression, render_statement
from .sort_query import sort_query, QuerySorterException
from .database import (
database_exists,
create_database,
drop_database,
escape_like,
is_auto_assigned_date_column,
is_indexed_foreign_key,
non_indexed_foreign_keys,
)
from .orm import (
declarative_base,
getdotattr,
has_changes,
identity,
naturally_equivalent,
primary_keys,
table_name,
)
__all__ = (
create_database,
create_mock_engine,
database_exists,
declarative_base,
defer_except,
drop_database,
escape_like,
getdotattr,
has_changes,
identity,
is_auto_assigned_date_column,
is_indexed_foreign_key,
mock_engine,
naturally_equivalent,
non_indexed_foreign_keys,
primary_keys,
QuerySorterException,
render_expression,
render_statement,
sort_query,
table_name,
)
| bsd-3-clause | Python |
a5e9bcdf365921818c8c69810f014664a03ebf3e | update conan script | Enhex/Cereal-Optional-NVP,Enhex/Cereal-Optional-NVP | test_package/conanfile.py | test_package/conanfile.py | import os
from conans import ConanFile, CMake, tools
class CerealoptionalnvpTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is
# in "test_package"
cmake.configure()
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
if not tools.cross_building(self.settings):
os.chdir("bin")
self.run(".%sexample" % os.sep)
| from conans import ConanFile, CMake
import os
class CerealoptionalnvpTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
| mit | Python |
178993d9f1da3b1bfade0b3fca076bd069936115 | Fix name of postgresql dialect. | konstantinoskostis/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,JackWink/sqlalchemy-utils,cheungpat/sqlalchemy-utils,spoqa/sqlalchemy-utils,marrybird/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,rmoorman/sqlalchemy-utils | sqlalchemy_utils/functions/database.py | sqlalchemy_utils/functions/database.py | from sqlalchemy.engine.url import make_url
import sqlalchemy as sa
from sqlalchemy.exc import ProgrammingError, OperationalError
import os
def database_exists(url):
"""Check if a database exists.
"""
url = make_url(url)
database = url.database
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except (ProgrammingError, OperationalError):
return False
def create_database(url, encoding='utf8'):
"""Issue the appropriate CREATE DATABASE statement.
"""
url = make_url(url)
database = url.database
if not url.drivername.startswith('sqlite'):
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "CREATE DATABASE %s ENCODING = '%s'" % (database, encoding)
engine.execute(text)
elif engine.dialect.name == 'mysql':
text = "CREATE DATABASE %s CHARACTER SET = '%s'" % (database, encoding)
engine.execute(text)
elif engine.dialect.name == 'sqlite' and database != ':memory:':
open(database, 'w').close()
else:
text = "CREATE DATABASE %s" % database
engine.execute(text)
def drop_database(url):
"""Issue the appropriate DROP DATABASE statement.
"""
url = make_url(url)
database = url.database
if not url.drivername.startswith('sqlite'):
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'sqlite' and url.database != ':memory:':
os.remove(url.database)
else:
text = "DROP DATABASE %s" % database
engine.execute(text)
| from sqlalchemy.engine.url import make_url
import sqlalchemy as sa
from sqlalchemy.exc import ProgrammingError
import os
def database_exists(url):
"""Check if a database exists.
"""
url = make_url(url)
database = url.database
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgres':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except ProgrammingError:
return False
def create_database(url, encoding='utf8'):
"""Issue the appropriate CREATE DATABASE statement.
"""
url = make_url(url)
database = url.database
if not url.drivername.startswith('sqlite'):
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgres':
text = "CREATE DATABASE %s ENCODING = '%s'" % (database, encoding)
engine.execute(text)
elif engine.dialect.name == 'mysql':
text = "CREATE DATABASE %s CHARACTER SET = '%s'" % (database, encoding)
engine.execute(text)
elif engine.dialect.name == 'sqlite' and database != ':memory:':
open(database, 'w').close()
else:
text = "CREATE DATABASE %s" % database
engine.execute(text)
def drop_database(url):
"""Issue the appropriate DROP DATABASE statement.
"""
url = make_url(url)
database = url.database
if not url.drivername.startswith('sqlite'):
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'sqlite' and url.database != ':memory:':
os.remove(url.database)
else:
text = "DROP DATABASE %s" % database
engine.execute(text)
| bsd-3-clause | Python |
91242483a79a66eb18fdcbf1090422541d6a06b0 | Add support for .db2 files | jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow | wdbc/environment.py | wdbc/environment.py | # -*- coding: utf-8 -*-
import os, os.path
from .. import wdbc
stripfilename = wdbc.getfilename
class Environment(object):
def __init__(self, build, locale="enGB", base="/var/www/sigrie/caches/caches/"):
self.build = build
self.path = "%s/%i/%s/" % (base, build, locale)
if not os.path.exists(self.path):
raise ValueError(self.path)
self.files = {}
self.__cache = {}
for f in os.listdir(self.path):
_f = f.lower()
if _f.endswith(".db2") or _f.endswith(".dbc") or _f.endswith(".wdb"):
self.files[stripfilename(f)] = self.path + f
def __getitem__(self, item):
item = stripfilename(item)
if item not in self.__cache:
self.__cache[item] = wdbc.fopen(self.files[item], build=self.build, environment=self)
return self.__cache[item]
def __contains__(self, item):
return stripfilename(item) in self.files
def __iter__(self):
return self.files.__iter__()
def __len__(self):
return self.files.__len__()
| # -*- coding: utf-8 -*-
import os, os.path
from .. import wdbc
stripfilename = wdbc.getfilename
class Environment(object):
def __init__(self, build, locale="enGB", base="/var/www/sigrie/caches/caches/"):
self.build = build
self.path = "%s/%i/%s/" % (base, build, locale)
if not os.path.exists(self.path):
raise ValueError(self.path)
self.files = {}
self.__cache = {}
for f in os.listdir(self.path):
_f = f.lower()
if _f.endswith(".dbc") or _f.endswith(".wdb"):
self.files[stripfilename(f)] = self.path + f
def __getitem__(self, item):
item = stripfilename(item)
if item not in self.__cache:
self.__cache[item] = wdbc.fopen(self.files[item], build=self.build, environment=self)
return self.__cache[item]
def __contains__(self, item):
return stripfilename(item) in self.files
def __iter__(self):
return self.files.__iter__()
def __len__(self):
return self.files.__len__()
| cc0-1.0 | Python |
a95f43767cb95d5a0137a6563c6fdbd725663aa6 | fix community dump | hasadna/OpenCommunity,yaniv14/OpenCommunity,nonZero/OpenCommunity,nonZero/OpenCommunity,yaniv14/OpenCommunity,yaniv14/OpenCommunity,nonZero/OpenCommunity,hasadna/OpenCommunity,hasadna/OpenCommunity,nonZero/OpenCommunity,hasadna/OpenCommunity,yaniv14/OpenCommunity | src/communities/management/commands/dump_community.py | src/communities/management/commands/dump_community.py | from communities.models import Community
from django.core import serializers
from django.core.management.base import BaseCommand
from issues.models import Issue, IssueComment, IssueCommentRevision, Proposal
from meetings.models import Meeting, AgendaItem, MeetingParticipant, \
MeetingExternalParticipant
from users.models import Membership, Invitation
import json
class Command(BaseCommand):
help = "Dumps a community by it's pk"
def handle(self, *args, **options):
cid = int(args[0])
sets = (
[m.user for m in Membership.objects.filter(community_id=cid)],
Community.objects.filter(pk=cid),
Invitation.objects.filter(community_id=cid),
Membership.objects.filter(community_id=cid),
Meeting.objects.filter(community_id=cid),
Issue.objects.filter(community_id=cid),
IssueComment.objects.filter(issue__community_id=cid),
IssueCommentRevision.objects.filter(comment__issue__community_id=cid),
Proposal.objects.filter(issue__community_id=cid),
AgendaItem.objects.filter(issue__community_id=cid),
MeetingParticipant.objects.filter(meeting__community_id=cid),
MeetingExternalParticipant.objects.filter(meeting__community_id=cid),
)
l = []
for qs in sets:
l += list(qs)
j = json.loads(serializers.serialize("json", l, use_natural_keys=True, indent=4))
for o in j:
o['pk'] = None
print json.dumps(j, indent=4)
| from communities.models import Community
from django.core import serializers
from django.core.management.base import BaseCommand
from issues.models import Issue, IssueComment, IssueCommentRevision, Proposal
from meetings.models import Meeting, AgendaItem, MeetingParticipant, \
MeetingExternalParticipant
from users.models import Membership, Invitation
import json
class Command(BaseCommand):
help = "Dumps a community by it's pk"
def handle(self, *args, **options):
cid = int(args[0])
sets = (
[m.user for m in Membership.objects.filter(community_id=cid)],
Community.objects.filter(pk=cid),
Invitation.objects.filter(community_id=cid),
Meeting.objects.filter(community_id=cid),
Issue.objects.filter(community_id=cid),
IssueComment.objects.filter(issue__community_id=cid),
IssueCommentRevision.objects.filter(comment__issue__community_id=cid),
Proposal.objects.filter(issue__community_id=cid),
AgendaItem.objects.filter(issue__community_id=cid),
MeetingParticipant.objects.filter(meeting__community_id=cid),
MeetingParticipant.objects.filter(meeting__community_id=cid),
MeetingExternalParticipant.objects.filter(meeting__community_id=cid),
)
l = []
for qs in sets:
l += list(qs)
j = json.loads(serializers.serialize("json", l, use_natural_keys=True, indent=4))
for o in j:
o['pk'] = None
print json.dumps(j, indent=4)
| bsd-3-clause | Python |
ffb95904557695594a93bdf203fc6f65a5b27244 | add reservation for exact day | kojdjak/django-reservations,kojdjak/django-reservations | reservations/rutils.py | reservations/rutils.py | from django.utils import timezone, dateparse
from datetime import datetime, timedelta
from .models import Reservation, Field
'''
kind of utils for django-reservations
'''
class ReservationExist(Exception):
pass
def create_reservation(field_id, res_date, reservation_time, user):
'''
Create reservation.
:param field_id: id of field for which to create reservations
:param res_date: date on which to create reservation. None -> today
:param reservation_time: time of reservation
:param user: actual user or None
:return:
'''
field = Field.objects.get(id=field_id)
today = dateparse.parse_date(res_date) if res_date else timezone.now()
time = timezone.datetime(today.year, today.month, today.day, int(reservation_time), tzinfo=timezone.now().tzinfo)
return Reservation.objects.create(name="Reservation", field=field, user=user, time=time)
def get_reservations(field_id, res_date):
field = Field.objects.get(id=field_id)
today = dateparse.parse_date(res_date)if res_date else timezone.now()
time = timezone.datetime(today.year, today.month, today.day, 0)
return Reservation.objects.filter(time__range=[time, time+timedelta(days=1)])
| from django.utils import timezone, dateparse
from datetime import datetime, timedelta
from .models import Reservation, Field
'''
kind of utils for django-reservations
'''
class ReservationExist(Exception):
pass
def create_reservation(field_id, res_date, reservation_time, user):
field = Field.objects.get(id=field_id)
today = dateparse.parse_date(res_date) if res_date else timezone.now()
time = timezone.datetime(today.year, today.month, today.day, int(reservation_time), tzinfo=timezone.now().tzinfo)
Reservation.objects.create(name="Reservation", field=field, user=user, time=time)
pass
def get_reservations(field_id, res_date):
field = Field.objects.get(id=field_id)
today = dateparse.parse_date(res_date)if res_date else timezone.now()
time = timezone.datetime(today.year, today.month, today.day, 0)
return Reservation.objects.filter(time__range=[time, time+timedelta(days=1)])
| mit | Python |
88c74cd60724fd68c30f31f87940d4558b092023 | Add char protection, remove ending line return | moreymat/omw-graph,moreymat/omw-graph,moreymat/omw-graph | parser/srcs/simpleFileParser.py | parser/srcs/simpleFileParser.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Simple File Parser
"""
import os
import sys
__author__ = 'Guieu Christophe, Tallot Adrien'
__date__ = '25-03-2014'
__version__ = '0.1'
dtl = []
ccl = []
deli = ''
def verifyFile(filename):
if filename == '':
print('Error : put filename')
sys.exit(-1)
elif not(os.path.isfile(filename)):
print('Error : put a valid filename')
sys.exit(-1)
def openFile(filename=''):
verifyFile(filename)
f = open(filename, 'r')
return f
def removeCR(word):
return word[0:-1]
def protectChar(word):
if '"' in word:
lw = list(word)
lw[lw.index('"')] = '\\"'
word = ''.join(lw)
return word
def parseWord(word):
word = removeCR(word)
word = protectChar(word)
return str(word)
def splitLine(line):
global deli
return line.split(deli)
def parseLine(line):
if line[0] in ccl:
return
else:
sl = splitLine(line)
if not(sl[1] in dtl):
return
word = parseWord(str(sl[2]))
return (sl[0], str(word))
def setVariable(delimitor='\t',
commentcharlist=['#'],
datatypelist=['lemma']):
global dtl
global ccl
global deli
dtl = datatypelist
ccl = commentcharlist
deli = delimitor
def parseFile(filename='', delimitor='\t',
commentcharlist=['#'],
datatypelist=['lemma']):
data = []
setVariable(delimitor, commentcharlist, datatypelist)
f = openFile(filename)
for line in f:
kv = parseLine(line)
data.append(kv)
return data
def main():
parseFile('../data/wn-data-heb.tab')
if __name__ == '__main__':
main()
| #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Simple File Parser
"""
import os
import sys
__author__ = 'Guieu Christophe, Tallot Adrien'
__date__ = '25-03-2014'
__version__ = '0.1'
dtl = []
ccl = []
deli = ''
def verifyFile(filename):
if filename == '':
print('Error : put filename')
sys.exit(-1)
elif not(os.path.isfile(filename)):
print('Error : put a valid filename')
sys.exit(-1)
def openFile(filename=''):
verifyFile(filename)
f = open(filename, 'r')
return f
def splitLine(line):
global deli
return line.split(deli)
def parseLine(line):
if line[0] in ccl:
return
else:
sl = splitLine(line)
if not(sl[1] in dtl):
return
return (sl[0], sl[2])
def setVariable(delimitor='\t',
commentcharlist=['#'],
datatypelist=['lemma']):
global dtl
global ccl
global deli
dtl = datatypelist
ccl = commentcharlist
deli = delimitor
def parseFile(filename='', delimitor='\t',
commentcharlist=['#'],
datatypelist=['lemma']):
data = []
setVariable(delimitor, commentcharlist, datatypelist)
f = openFile(filename)
for line in f:
kv = parseLine(line)
data.append(kv)
return data
def main():
parseFile('../data/wn-data-heb.tab')
if __name__ == '__main__':
main()
| mit | Python |
1e7a6bcd6a2236feb32ec36224a3fb224f0e374c | Add license key in __openerp__.py | bmya/server-tools,bmya/server-tools,brain-tec/server-tools,brain-tec/server-tools,brain-tec/server-tools,bmya/server-tools | dbfilter_from_header/__openerp__.py | dbfilter_from_header/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "dbfilter_from_header",
"version": "1.0",
"author": "Therp BV",
"license": "AGPL-3",
"complexity": "normal",
"description": """
This addon lets you pass a dbfilter as a HTTP header.
This is interesting for setups where database names can't be mapped to
proxied host names.
In nginx, use
proxy_set_header X-OpenERP-dbfilter [your filter];
The addon has to be loaded as server-wide module.
""",
"category": "Tools",
"depends": [
'web',
],
"data": [
],
"js": [
],
"css": [
],
"auto_install": False,
"installable": True,
"external_dependencies": {
'python': [],
},
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "dbfilter_from_header",
"version": "1.0",
"author": "Therp BV",
"complexity": "normal",
"description": """
This addon lets you pass a dbfilter as a HTTP header.
This is interesting for setups where database names can't be mapped to
proxied host names.
In nginx, use
proxy_set_header X-OpenERP-dbfilter [your filter];
The addon has to be loaded as server-wide module.
""",
"category": "Tools",
"depends": [
'web',
],
"data": [
],
"js": [
],
"css": [
],
"auto_install": False,
"installable": True,
"external_dependencies": {
'python': [],
},
}
| agpl-3.0 | Python |
2c4a33772817a98cacc5bd82517cfd5918711f28 | Add allowed hosts | nsavch/xanmel-web | xanmel_web/settings/prod.py | xanmel_web/settings/prod.py | from .default import *
ALLOWED_HOSTS = ['xon.teichisma.info']
DEBUG = False
TEMPLATE_DEBUG = False
with open('/etc/xanmel.yaml', 'r') as f:
XANMEL_CONFIG = yaml.safe_load(f)
XONOTIC_SERVERS = XANMEL_CONFIG['modules']['xanmel.modules.xonotic.XonoticModule']['servers']
| from .default import *
DEBUG = False
TEMPLATE_DEBUG = False
with open('/etc/xanmel.yaml', 'r') as f:
XANMEL_CONFIG = yaml.safe_load(f)
XONOTIC_SERVERS = XANMEL_CONFIG['modules']['xanmel.modules.xonotic.XonoticModule']['servers']
| agpl-3.0 | Python |
7ab9403b1733360a53681ba39a0040c4231e8382 | Fix to Rest | luizcieslak/AlGDock,gkumar7/AlGDock,gkumar7/AlGDock,gkumar7/AlGDock,luizcieslak/AlGDock,luizcieslak/AlGDock,gkumar7/AlGDock,luizcieslak/AlGDock | gui/api/REST.py | gui/api/REST.py | from flask import Flask, jsonify
import os
from cross_domain import *
app = Flask(__name__)
try:
TARGET = os.environ['TARGET']
AlGDock = os.environ['AlGDock']
except Exception:
print 'export TARGET=<path to data>'
exit(1)
import sys
sys.path.insert(0, AlGDock)
from BindingPMF_arguments import *
@app.route('/api/v1.0/proteins', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protein_names():
proteins = os.walk(TARGET).next()[1]
protein_lst = [{"filename": protein} for protein in sorted(proteins)]
return jsonify({"files": protein_lst})
@app.route('/api/v1.0/ligands/<protein>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_ligand_names(protein):
ligands = os.walk(os.path.join(TARGET, protein, "ligand")).next()[2]
ligand_lst = [{"filename": ligand} for ligand in sorted(ligands)]
return jsonify({"files": ligand_lst})
@app.route('/api/v1.0/protocols', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protocols():
choices = arguments['protocol']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"protocol": choice_lst})
@app.route('/api/v1.0/run/<protein>/<ligand>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def run(protein, ligand):
return "Protein selected: %s; Ligand selected: %s" % (protein, ligand)
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, jsonify
import os
from cross_domain import *
app = Flask(__name__)
try:
TARGET = os.environ['TARGET']
AlGDock = os.environ['AlGDock']
except Exception:
print 'export TARGET=<path to data>'
exit(1)
import sys
sys.path.insert(0, AlGDock)
from BindingPMF_arguments import *
@app.route('/api/v1.0/proteins', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protein_names():
proteins = os.walk(TARGET).next()[1]
protein_lst = [{"filename": protein} for protein in sorted(proteins)]
return jsonify({"files": protein_lst})
@app.route('/api/v1.0/ligands/<protein>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_ligand_names(protein):
ligands = os.walk(os.path.join(TARGET, protein, "ligand")).next()[2]
ligand_lst = [{"filename": ligand} for ligand in sorted(ligands)]
return jsonify({"files": ligand_lst})
@app.route('/api/v1.0/protocols', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def get_protocols(parameter):
choices = arguments['protocol']['choices']
choice_lst = [{"choice": choice} for choice in choices]
return jsonify({"protocol": choice_lst})
@app.route('/api/v1.0/run/<protein>/<ligand>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def run(protein, ligand):
return "Protein selected: %s; Ligand selected: %s" % (protein, ligand)
if __name__ == '__main__':
app.run(debug=True)
| mit | Python |
c4d2272db2d5259ff54503a20ef92ae642ae34da | Hide swap if it is zero | yang-ling/i3pystatus,yang-ling/i3pystatus | i3pystatus/mem_swap.py | i3pystatus/mem_swap.py | from i3pystatus import IntervalModule
from psutil import swap_memory
from .core.util import round_dict
class Swap(IntervalModule):
"""
Shows memory load
.. rubric:: Available formatters
* {used_swap}
Requires psutil (from PyPI)
"""
format = "{used_swap} MiB"
divisor = 1024 ** 2
color = "#00FF00"
warn_color = "#FFFF00"
alert_color = "#FF0000"
warn_percentage = 50
alert_percentage = 80
round_size = 1
settings = (
("format", "format string used for output."),
("divisor",
"divide all byte values by this value, default is 1024**2 (megabytes)"),
("warn_percentage", "minimal percentage for warn state"),
("alert_percentage", "minimal percentage for alert state"),
("color", "standard color"),
("warn_color",
"defines the color used wann warn percentage ist exceeded"),
("alert_color",
"defines the color used when alert percentage is exceeded"),
("round_size", "defines number of digits in round"),
)
def run(self):
swap_usage = swap_memory()
used = swap_usage.used
if swap_usage.percent >= self.alert_percentage:
color = self.alert_color
elif swap_usage.percent >= self.warn_percentage:
color = self.warn_color
else:
color = self.color
cdict = {
"used_swap": used / self.divisor,
}
round_dict(cdict, self.round_size)
if used == 0:
self.output = {
"full_text": "",
}
else:
self.output = {
"full_text": self.format.format(**cdict),
"color": color
}
| from i3pystatus import IntervalModule
from psutil import swap_memory
from .core.util import round_dict
class Swap(IntervalModule):
"""
Shows memory load
.. rubric:: Available formatters
* {used_swap}
Requires psutil (from PyPI)
"""
format = "{used_swap} MiB"
divisor = 1024 ** 2
color = "#00FF00"
warn_color = "#FFFF00"
alert_color = "#FF0000"
warn_percentage = 50
alert_percentage = 80
round_size = 1
settings = (
("format", "format string used for output."),
("divisor",
"divide all byte values by this value, default is 1024**2 (megabytes)"),
("warn_percentage", "minimal percentage for warn state"),
("alert_percentage", "minimal percentage for alert state"),
("color", "standard color"),
("warn_color",
"defines the color used wann warn percentage ist exceeded"),
("alert_color",
"defines the color used when alert percentage is exceeded"),
("round_size", "defines number of digits in round"),
)
def run(self):
swap_usage = swap_memory()
used = swap_usage.used
if swap_usage.percent >= self.alert_percentage:
color = self.alert_color
elif swap_usage.percent >= self.warn_percentage:
color = self.warn_color
else:
color = self.color
cdict = {
"used_swap": used / self.divisor,
}
round_dict(cdict, self.round_size)
self.output = {
"full_text": self.format.format(**cdict),
"color": color
}
| mit | Python |
244a8ef2d3976970f8647e5fdd3979932cebe6d7 | Remove debug task from Celery | fengthedroid/heroes-of-the-storm-replay-parser,fengthedroid/heroes-of-the-storm-replay-parser,Oize/heroes-of-the-storm-replay-parser,Oize/heroes-of-the-storm-replay-parser,karlgluck/heroes-of-the-storm-replay-parser,Oize/heroes-of-the-storm-replay-parser | webserver/celery.py | webserver/celery.py | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webserver.settings')
app = Celery('webserver')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webserver.settings')
app = Celery('webserver')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| mit | Python |
ea6d18725f06ee76963d36c7e910a3d0a100e967 | change the name in my groups py | moranmo29/ShareLink,moranmo29/ShareLink,moranmo29/ShareLink | web/pages/mygroups.py | web/pages/mygroups.py | from google.appengine.ext.webapp import template
import webapp2
import json
from models.user import User
from models.link import Link
from models.group import Group
class IndexHandler(webapp2.RequestHandler):
def get(self):
template_params={}
user = None
if self.request.cookies.get('our_token'): #the cookie that should contain the access token!
user = User.checkToken(self.request.cookies.get('our_token'))
if not user:
html = template.render("web/templates/index.html", {})
self.response.write(html)
return
template_params['useremail'] = user.email
grouplist= Group.getAllGroups(user)
groups= []
if grouplist:
for group in grouplist:
group_name= group['group_name']
groupid=group['id']
one=[group_name,groupid]
groups.append(one)
template_params['groupss']= groups
html = template.render("web/templates/mygroups.html", template_params)
self.response.write(html)
app = webapp2.WSGIApplication([
('/mygroups', IndexHandler)
], debug=True)
| from google.appengine.ext.webapp import template
import webapp2
import json
from models.user import User
from models.link import Link
from models.group import Group
class IndexHandler(webapp2.RequestHandler):
def get(self):
template_params={}
user = None
if self.request.cookies.get('our_token'): #the cookie that should contain the access token!
user = User.checkToken(self.request.cookies.get('our_token'))
if not user:
html = template.render("web/templates/index.html", {})
self.response.write(html)
return
template_params['useremail'] = user.email
grouplist= Group.getAllGroups(user)
groups= []
if grouplist:
for group in grouplist:
group_name= group['group_name']
groupid=group['id']
one=[group_name,groupid]
groups.append(one)
template_params['groups']= groups
html = template.render("web/templates/mygroups.html", template_params)
self.response.write(html)
app = webapp2.WSGIApplication([
('/mygroups', IndexHandler)
], debug=True)
| mit | Python |
e4af848276255543e0ece169068a5d46fdd0549a | add specific LaTeX processing exception | cstrelioff/resumepy,cstrelioff/resumepy | resumepy/exceptions.py | resumepy/exceptions.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Christopher C. Strelioff <chris.strelioff@gmail.com>
#
# Distributed under terms of the MIT license.
"""exceptions.py
Exceptions for the resumepy packge.
"""
class resumepyException(Exception):
"""Root resumepy Exception."""
pass
class CreateDirError(resumepyException):
"""Exception raised for error creating directory."""
pass
class CreateFileError(resumepyException):
"""Exception raised for error creating file."""
pass
class DirError(resumepyException):
"""Exception raised for error finding directory."""
pass
class FileError(resumepyException):
"""Exception raised for error finding file."""
pass
class LaTeXError(resumepyException):
"""Exception raised when running LaTeX."""
pass
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Christopher C. Strelioff <chris.strelioff@gmail.com>
#
# Distributed under terms of the MIT license.
"""exceptions.py
Exceptions for the resumepy packge.
"""
class resumepyException(Exception):
"""Root resumepy Exception."""
pass
class CreateDirError(resumepyException):
"""Exception raised for error creating directory."""
pass
class CreateFileError(resumepyException):
"""Exception raised for error creating file."""
pass
class DirError(resumepyException):
"""Exception raised for error finding directory."""
pass
class FileError(resumepyException):
"""Exception raised for error finding file."""
pass
| mit | Python |
d151e632cbcdc033d0687922c95d70b69dcb7233 | test for searchUID for full record | tainstr/misura.canon,tainstr/misura.canon | misura/canon/indexer/tests/test_indexer.py | misura/canon/indexer/tests/test_indexer.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from misura import parameters as params
from misura.canon import indexer
import os
cur_dir = os.path.dirname(os.path.realpath(__file__))
paths = [cur_dir + '/files']
dbPath = cur_dir + '/files/test.sqlite'
class Indexer(unittest.TestCase):
def setUp(self):
self.indexer = indexer.Indexer(paths=paths)
self.indexer.open_db(dbPath)
self.indexer.close_db()
def test_rebuild(self):
self.indexer.rebuild()
self.assertEqual(2, self.indexer.get_len())
def test_header(self):
header = self.indexer.header()
self.assertEqual(['file', 'serial', 'uid', 'id', 'date', 'instrument',
'flavour', 'name', 'elapsed', 'nSamples', 'comment', 'verify'], header)
def test_query(self):
result = self.indexer.query()
instrument = result[0][5]
result = self.indexer.query({'instrument': instrument})
self.assertEqual(len(result), 1)
result = self.indexer.query({'instrument': 'pippo'})
self.assertEqual(len(result), 0)
def test_searchUID(self):
result = self.indexer.searchUID('eadd3abc68fa78ad64eb6df7174237a0')
self.assertEqual(result, cur_dir + '/files/dummy1.h5')
def test_searchUIDFull(self):
result = self.indexer.searchUID('eadd3abc68fa78ad64eb6df7174237a0', True)
self.assertEqual(result, (cur_dir + '/files/dummy1.h5',))
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from misura import parameters as params
from misura.canon import indexer
import os
cur_dir = os.path.dirname(os.path.realpath(__file__))
paths = [cur_dir + '/files']
dbPath = cur_dir + '/files/test.sqlite'
class Indexer(unittest.TestCase):
def setUp(self):
self.indexer = indexer.Indexer(paths=paths)
self.indexer.open_db(dbPath)
self.indexer.close_db()
def test_rebuild(self):
self.indexer.rebuild()
self.assertEqual(2, self.indexer.get_len())
def test_header(self):
header = self.indexer.header()
self.assertEqual(['file', 'serial', 'uid', 'id', 'date', 'instrument',
'flavour', 'name', 'elapsed', 'nSamples', 'comment', 'verify'], header)
def test_query(self):
result = self.indexer.query()
instrument = result[0][5]
result = self.indexer.query({'instrument': instrument})
self.assertEqual(len(result), 1)
result = self.indexer.query({'instrument': 'pippo'})
self.assertEqual(len(result), 0)
def test_searchUID(self):
result = self.indexer.searchUID('eadd3abc68fa78ad64eb6df7174237a0')
self.assertEqual(result, cur_dir + '/files/dummy1.h5')
if __name__ == "__main__":
unittest.main()
| mit | Python |
09ffeb2f53b853914056e165e4e9824e025d2d31 | Add missing dependency. | mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju | utility.py | utility.py | from contextlib import contextmanager
import errno
import os
import socket
import sys
from jujupy import until_timeout
@contextmanager
def scoped_environ():
old_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
def wait_for_port(host, port, closed=False, timeout=30):
for remaining in until_timeout(timeout):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(max(remaining, 5))
try:
conn.connect((host, port))
except socket.timeout:
if closed:
return
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise
if closed:
return
except Exception as e:
print_now('Unexpected %r: %s' % (type(e), e))
raise
else:
conn.close()
if not closed:
return
sleep(1)
raise Exception('Timed out waiting for port.')
def print_now(string):
print(string)
sys.stdout.flush()
| from contextlib import contextmanager
import errno
import os
import socket
from jujupy import until_timeout
@contextmanager
def scoped_environ():
old_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
def wait_for_port(host, port, closed=False, timeout=30):
for remaining in until_timeout(timeout):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(max(remaining, 5))
try:
conn.connect((host, port))
except socket.timeout:
if closed:
return
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise
if closed:
return
except Exception as e:
print_now('Unexpected %r: %s' % (type(e), e))
raise
else:
conn.close()
if not closed:
return
sleep(1)
raise Exception('Timed out waiting for port.')
def print_now(string):
print(string)
sys.stdout.flush()
| agpl-3.0 | Python |
24a101c333ddc58fd4d00f96e80acbe37d747a03 | Add genre properties | AudioCommons/ac-annotator,AudioCommons/ac-annotator,AudioCommons/ac-annotator | music_annotator/views.py | music_annotator/views.py | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import json
def index(request):
freesound_ids = ['181425', '370934', '191630', '191630', '232014', '219056', '325407']
return render(request, 'choose_sound.html', {'freesound_ids': freesound_ids})
@csrf_exempt
def annotate(request, fsid):
if request.method == 'POST':
print(request.POST)
# create a fake schema for now
schema_dict = {'content_types': {'note': ['note', 'instrument'], 'chord': ['chord', 'instrument'],
'melody': ['instrument', 'mood', 'genre'], 'chord progression': ['mood', 'genre'],
'percussive hit': ['percussion'], 'rhythm pattern': ['mood', 'genre'],
'musical loop': ['mood', 'genre'], 'texture/drone': ['mood', 'genre']},
'proprieties': {'note': ['C', 'C#/D♭', 'D', 'D#/E♭', 'E', 'F', 'F#/G♭', 'G', 'G#/A♭', 'A', 'A#/B♭', 'B'],
'instrument': ['piano', 'guitar', 'violin', 'bass', 'accordion', 'saxophone', 'trumpet'],
'chord': ['C', 'C#/D♭', 'D', 'D#/E♭', 'E', 'F', 'F#/G♭', 'G', 'G#/A♭', 'A', 'A#/B♭', 'B'],
'mood': ['happy', 'funny', 'sad', 'tender', 'exciting', 'angry', 'scary'],
'percussion': ['kick', 'snare', 'hi-hat', 'tom', 'crash', 'ride'],
'genre': ['pop', 'hip hop', 'rock', 'blues', 'soul', 'reggae', 'country', 'funk',
'folk', 'jazz', 'classical', 'electronic'],
}}
json_string = json.dumps(schema_dict)
return render(request, 'annotate.html', {'schema': json_string, 'sound_id': fsid})
| from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import json
def index(request):
freesound_ids = ['181425', '370934', '191630', '191630', '232014', '219056', '325407']
return render(request, 'choose_sound.html', {'freesound_ids': freesound_ids})
@csrf_exempt
def annotate(request, fsid):
if request.method == 'POST':
print(request.POST)
# create a fake schema for now
schema_dict = {'content_types': {'note': ['note', 'instrument'], 'chord': ['chord', 'instrument'],
'melody': ['instrument', 'mood'], 'chord progression': ['mood'],
'percussive hit': ['percussion'], 'rhythm pattern': ['mood'],
'musical loop': ['mood'], 'texture/drone': ['mood']},
'proprieties': {'note': ['C', 'C#/D♭', 'D', 'D#/E♭', 'E', 'F', 'F#/G♭', 'G', 'G#/A♭', 'A', 'A#/B♭', 'B'],
'instrument': ['piano', 'guitar', 'violin', 'bass', 'accordion', 'saxophone', 'trumpet'],
'chord': ['C', 'C#/D♭', 'D', 'D#/E♭', 'E', 'F', 'F#/G♭', 'G', 'G#/A♭', 'A', 'A#/B♭', 'B'],
'mood': ['happy', 'funny', 'sad', 'tender', 'exciting', 'angry', 'scary'],
'percussion': ['kick', 'snare', 'hi-hat', 'tom', 'crash', 'ride'],
}}
json_string = json.dumps(schema_dict)
return render(request, 'annotate.html', {'schema': json_string, 'sound_id': fsid})
| apache-2.0 | Python |
946aa536852ddd191aa4cb9550702b014c68dae4 | Check pep8 | Stark-Mountain/meetup-facebook-bot,Stark-Mountain/meetup-facebook-bot | tests/models/talk_test.py | tests/models/talk_test.py | from unittest import TestCase
from unittest.mock import patch, MagicMock
from meetup_facebook_bot.models.talk import Talk
class TalkTestCase(TestCase):
def setUp(self):
self.db_session = MagicMock()
self.like_mock = MagicMock()
self.user_id = 1
self.talk_id = 1
self.talk = Talk(id=self.talk_id)
@patch('meetup_facebook_bot.models.talk.Like')
def test_set_like(self, like_class_mock):
like_class_mock.return_value = self.like_mock
self.talk.is_liked_by = MagicMock(return_value=False)
self.talk.set_like(self.user_id, self.db_session)
like_class_mock.assert_called_once_with(user_facebook_id=self.user_id, talk_id=self.talk_id)
self.db_session.add.assert_called_once_with(self.like_mock)
def test_unset_like(self):
self.talk.is_liked_by = MagicMock(return_value=True)
scalar_mock = MagicMock(return_value=self.like_mock)
self.db_session.query().filter_by().scalar = scalar_mock
self.talk.unset_like(self.user_id, self.db_session)
self.db_session.delete.assert_called_once_with(self.like_mock)
| from unittest import TestCase
from unittest.mock import patch, MagicMock
from meetup_facebook_bot.models.talk import Talk
class TalkTestCase(TestCase):
def setUp(self):
self.db_session = MagicMock()
self.user_id = 1
self.talk_id = 1
@patch('meetup_facebook_bot.models.talk.Like')
def test_set_like(self, like_class_mock):
talk = Talk(id=self.talk_id)
mock_like = MagicMock()
like_class_mock.return_value = mock_like
talk.is_liked_by = MagicMock(return_value=False)
talk.set_like(self.user_id, self.db_session)
like_class_mock.assert_called_once_with(user_facebook_id=self.user_id, talk_id=self.talk_id)
self.db_session.add.assert_called_once_with(mock_like)
def test_unset_like(self):
talk = Talk(id=self.talk_id)
talk.is_liked_by = MagicMock(return_value=True)
like_mock = MagicMock()
scalar_mock = MagicMock(return_value=like_mock)
self.db_session.query().filter_by().scalar = scalar_mock
talk.unset_like(self.user_id, self.db_session)
self.db_session.delete.assert_called_once_with(like_mock)
def test_revert_like(self):
pass
| mit | Python |
51e2515f1d8fe595f14edc40d5ea34fcb4b6844c | Bump version post-release | praekeltfoundation/seaworthy | seaworthy/__init__.py | seaworthy/__init__.py | """
seaworthy
~~~~~~~~~
.. todo::
Write some API reference docs for :mod:`seaworthy`.
"""
from .helpers import DockerHelper
from .logs import output_lines, wait_for_logs_matching
__all__ = ['DockerHelper', 'output_lines', 'wait_for_logs_matching']
__version__ = '0.2.2.dev0'
| """
seaworthy
~~~~~~~~~
.. todo::
Write some API reference docs for :mod:`seaworthy`.
"""
from .helpers import DockerHelper
from .logs import output_lines, wait_for_logs_matching
__all__ = ['DockerHelper', 'output_lines', 'wait_for_logs_matching']
__version__ = '0.2.1'
| bsd-3-clause | Python |
1e6f4c9bb79c4709ea17005025aa976ab035fe30 | switch to spaces indentation | fcfort/nyc-parking-ticket-checker | ticket_checker.py | ticket_checker.py | import argparse
import mechanize
import re
from bs4 import BeautifulSoup
BEAUTIFUL_SOUP_PARSER = "html.parser"
parser = argparse.ArgumentParser()
parser.add_argument('--violation', help='Violation #')
args = parser.parse_args()
br = mechanize.Browser()
# Get first URL
br.open("http://www1.nyc.gov/assets/finance/jump/pay_parking_camera_violations.html")
# Follow redirect contained in iframe src
soup = BeautifulSoup(br.response().read(), BEAUTIFUL_SOUP_PARSER)
br.open(soup.body.iframe['src'])
# Set violation #
br.select_form(nr=0) # Form has no `name`
# Because there is both a non-mobile and mobile version on the page, we need
# to find the first one and set it.
br.find_control(name='args.VIOLATION_NUMBER_NOL', nr=0).value = args.violation
# Remove duplicate form controls, otherwise we get an error from the server.
form_names_set = set([])
for control in br.form.controls[:]:
if control.name in form_names_set:
br.form.controls.remove(control)
else:
form_names_set.add(control.name)
# Submit form
br.submit()
# Look for violation response text
soup = BeautifulSoup(br.response().read(), BEAUTIFUL_SOUP_PARSER)
# Errors are put into a class `global-violation-prompt` div tag.
error_tags = soup.find_all(class_='global-violation-prompt')
if error_tags:
for tag in error_tags:
print tag.string
else:
match = re.search(r'No matches found for your violation search', html)
if match:
print "No tickets found for violation # " + args.violation
else:
print "Found a ticket for violation # " + args.violation
| import argparse
import mechanize
import re
from bs4 import BeautifulSoup
BEAUTIFUL_SOUP_PARSER = "html.parser"
parser = argparse.ArgumentParser()
parser.add_argument('--violation', help='Violation #')
args = parser.parse_args()
br = mechanize.Browser()
# Get first URL
br.open("http://www1.nyc.gov/assets/finance/jump/pay_parking_camera_violations.html")
# Follow redirect contained in iframe src
soup = BeautifulSoup(br.response().read(), BEAUTIFUL_SOUP_PARSER)
br.open(soup.body.iframe['src'])
# Set violation #
br.select_form(nr=0) # Form has no `name`
# Because there is both a non-mobile and mobile version on the page, we need
# to find the first one and set it.
br.find_control(name='args.VIOLATION_NUMBER_NOL', nr=0).value = args.violation
# Remove duplicate form controls, otherwise we get an error from the server.
form_names_set = set([])
for control in br.form.controls[:]:
if control.name in form_names_set:
br.form.controls.remove(control)
else:
form_names_set.add(control.name)
# Submit form
br.submit()
# Look for violation response text
soup = BeautifulSoup(br.response().read(), BEAUTIFUL_SOUP_PARSER)
# Errors are put into a class `global-violation-prompt` div tag.
error_tags = soup.find_all(class_='global-violation-prompt')
if error_tags:
for tag in error_tags:
print tag.string
else:
match = re.search(r'No matches found for your violation search', html)
if match:
print "No tickets found for violation # " + args.violation
else:
print "Found a ticket for violation # " + args.violation
| apache-2.0 | Python |
37ad21112e41133450284f9d99d323cae901dd06 | Update version | fedelemantuano/tika-app-python | tikapp/version.py | tikapp/version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "1.5.0"
if __name__ == "__main__":
print(__version__)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "1.4.0"
if __name__ == "__main__":
print(__version__)
| apache-2.0 | Python |
122a870812f3000639a439943bde980a6810a22e | Check output length before sending | corpnewt/CorpBot.py,corpnewt/CorpBot.py | Cogs/Ascii.py | Cogs/Ascii.py | from discord.ext import commands
from Cogs import Utils, DisplayName, PickList, FuzzySearch, Message
import pyfiglet
def setup(bot):
# Add the bot
bot.add_cog(Ascii(bot))
class Ascii(commands.Cog):
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
self.font_list = pyfiglet.FigletFont.getFonts()
@commands.command(aliases=["font","fonts","fontlist"])
async def asciifonts(self, ctx, search_term = None):
"""Lists the available ascii fonts."""
if search_term is None:
return await PickList.PagePicker(
title="Available ASCII Fonts ({:,} total)".format(len(self.font_list)),
description="\n".join(["{}. {}".format(str(i).rjust(3),x) for i,x in enumerate(self.font_list,start=1)]),
d_header="```\n",
d_footer="\n```",
ctx=ctx
).pick()
# Let's see if it's a full match
if search_term.lower() in self.font_list:
return await Message.Embed(
title="Font Exists",
description="`{}` is in the font list.".format(search_term.lower()),
color=ctx.author
).send(ctx)
# Let's get 3 close matches
font_match = FuzzySearch.search(search_term.lower(), self.font_list)
font_mess = "\n".join(["`└─ {}`".format(x["Item"]) for x in font_match])
await Message.Embed(
title="Font \"{}\" Not Fount".format(search_term),
fields=[{"name":"Close Font Matches:","value":font_mess}],
color=ctx.author
).send(ctx)
@commands.command(pass_context=True, no_pm=True)
async def ascii(self, ctx, *, text : str = None):
"""Beautify some text."""
if text is None: return await ctx.channel.send('Usage: `{}ascii [font (optional)] [text]`'.format(ctx.prefix))
font = None
# Split text by space - and see if the first word is a font
parts = text.split()
if len(parts) > 1 and parts[0].lower() in self.font_list:
# We got a font!
font = parts[0]
text = " ".join(parts[1:])
output = pyfiglet.figlet_format(text,font=font if font else pyfiglet.DEFAULT_FONT)
if not output: return await ctx.send("I couldn't beautify that text :(")
await ctx.send("```\n{}```".format(output))
| from discord.ext import commands
from Cogs import Utils, DisplayName, PickList, FuzzySearch, Message
import pyfiglet
def setup(bot):
# Add the bot
bot.add_cog(Ascii(bot))
class Ascii(commands.Cog):
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
self.font_list = pyfiglet.FigletFont.getFonts()
@commands.command(aliases=["font","fonts","fontlist"])
async def asciifonts(self, ctx, search_term = None):
"""Lists the available ascii fonts."""
if search_term is None:
return await PickList.PagePicker(
title="Available ASCII Fonts ({:,} total)".format(len(self.font_list)),
description="\n".join(["{}. {}".format(str(i).rjust(3),x) for i,x in enumerate(self.font_list,start=1)]),
d_header="```\n",
d_footer="\n```",
ctx=ctx
).pick()
# Let's see if it's a full match
if search_term.lower() in self.font_list:
return await Message.Embed(
title="Font Exists",
description="`{}` is in the font list.".format(search_term.lower()),
color=ctx.author
).send(ctx)
# Let's get 3 close matches
font_match = FuzzySearch.search(search_term.lower(), self.font_list)
font_mess = "\n".join(["`└─ {}`".format(x["Item"]) for x in font_match])
await Message.Embed(
title="Font \"{}\" Not Fount".format(search_term),
fields=[{"name":"Close Font Matches:","value":font_mess}],
color=ctx.author
).send(ctx)
@commands.command(pass_context=True, no_pm=True)
async def ascii(self, ctx, *, text : str = None):
"""Beautify some text."""
if text is None: return await ctx.channel.send('Usage: `{}ascii [font (optional)] [text]`'.format(ctx.prefix))
font = None
# Split text by space - and see if the first word is a font
parts = text.split()
if len(parts) > 1 and parts[0].lower() in self.font_list:
# We got a font!
font = parts[0]
text = " ".join(parts[1:])
await ctx.send("```\n{}```".format(pyfiglet.figlet_format(text,font=font if font else pyfiglet.DEFAULT_FONT)))
| mit | Python |
0ce8f07d670d5fb6d5e52669ea97ef10c7a784e2 | update number of neighbors for knn graph | usc-isi-i2/WEDC,usc-isi-i2/WEDC | wedc/domain/service/category_identification/graph/knn_graph.py | wedc/domain/service/category_identification/graph/knn_graph.py |
from sklearn.neighbors import NearestNeighbors
from sklearn import preprocessing
import numpy as np
# X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# X = np.array([])
# X = np.append(X, [[1, 1, 0, 0]], axis=0)
# X = np.append(X, [[0, 0, 0, 0]], axis=0)
# print X
# X = np.array(np.mat('1 2; 3 4'))
# X = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
# nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)
# distances, indices = nbrs.kneighbors(X)
# print indices
def build_graph(input, output, n_neighbors=10, algorithm='ball_tree'):
n_neighbors += 1
input_fh = open(input, 'rb')
output_fh = open(output, 'wb')
lines = input_fh.readlines()[:100]
size = len(lines)
lines = ';'.join(lines)
X = np.array(np.mat(lines))
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm).fit(X)
# Because the query set matches the training set, the nearest neighbor of each point is the point itself, at a distance of zero.
distances, indices = nbrs.kneighbors(X)
distances = preprocessing.normalize(distances, norm='l2')
for post_id in range(0, size):
post_indices = indices[post_id]
post_k_distances = distances[post_id]
# change to start from 1 for lab propagation library input format
graph_item = [post_id+1, 0]
post_neighbors = []
for idx in range(n_neighbors):
if post_id == post_indices[idx]:
continue
post_neighbors.append([post_indices[idx]+1, 1-post_k_distances[idx]])
graph_item.append(post_neighbors)
output_fh.write(str(graph_item)+'\n')
# output.write(' '.join(vector) + '\n')
input_fh.close()
output_fh.close()
|
from sklearn.neighbors import NearestNeighbors
from sklearn import preprocessing
import numpy as np
# X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# X = np.array([])
# X = np.append(X, [[1, 1, 0, 0]], axis=0)
# X = np.append(X, [[0, 0, 0, 0]], axis=0)
# print X
# X = np.array(np.mat('1 2; 3 4'))
# X = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
# nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)
# distances, indices = nbrs.kneighbors(X)
# print indices
def build_graph(input, output, n_neighbors=5, algorithm='ball_tree'):
input_fh = open(input, 'rb')
output_fh = open(output, 'wb')
lines = input_fh.readlines()[:100]
size = len(lines)
lines = ';'.join(lines)
X = np.array(np.mat(lines))
nbrs = NearestNeighbors(n_neighbors=n_neighbors+1, algorithm=algorithm).fit(X)
# Because the query set matches the training set, the nearest neighbor of each point is the point itself, at a distance of zero.
distances, indices = nbrs.kneighbors(X)
distances = preprocessing.normalize(distances, norm='l2')
for post_id in range(0, size):
post_indices = indices[post_id]
post_k_distances = distances[post_id]
# change to start from 1 for lab propagation library input format
graph_item = [post_id+1, 0]
post_neighbors = []
for idx in range(n_neighbors):
if post_id == post_indices[idx]:
continue
post_neighbors.append([post_indices[idx]+1, 1-post_k_distances[idx]])
graph_item.append(post_neighbors)
output_fh.write(str(graph_item)+'\n')
# output.write(' '.join(vector) + '\n')
input_fh.close()
output_fh.close()
| apache-2.0 | Python |
1f9964e0a39e405c06c274f50cc18d03a181cdec | remove what is hopefully unncessary code | opencord/xos,zdw/xos,opencord/xos,cboling/xos,zdw/xos,cboling/xos,cboling/xos,zdw/xos,cboling/xos,open-cloud/xos,open-cloud/xos,cboling/xos,zdw/xos,opencord/xos,open-cloud/xos | xos/observers/helloworldservice/steps/sync_helloworldtenant.py | xos/observers/helloworldservice/steps/sync_helloworldtenant.py | import os
import sys
from django.db.models import Q, F
from helloworldservice.models import HelloWorldService, HelloWorldTenant
from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
parentdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, parentdir)
# Class to define how we sync a tenant. Using SyncInstanceUsingAnsible we
# indicate where the find the YAML for ansible, where to find the SSH key,
# and the logic for determining what tenant needs updating, what additional
# attributes are needed, and how to delete an instance.
class SyncHelloWorldServiceTenant(SyncInstanceUsingAnsible):
# Indicates the position in the data model, this will run when XOS needs to
# enact a HelloWorldTenant
provides = [HelloWorldTenant]
# The actual model being enacted, usually the same as provides.
observes = HelloWorldTenant
# Number of miliseconds between interruptions of the observer
requested_interval = 0
# The ansible template to run
template_name = "sync_helloworldtenant.yaml"
# The location of the SSH private key to use when ansible connects to
# instances.
service_key_name = "/opt/xos/observers/helloworldservice/helloworldservice_private_key"
def __init__(self, *args, **kwargs):
super(SyncHelloWorldServiceTenant, self).__init__(*args, **kwargs)
# Defines the logic for determining what HelloWorldTenants need to be
# enacted.
def fetch_pending(self, deleted):
# If the update is not a deletion, then we get all of the instnaces that
# have been updated or have not been enacted.
if (not deleted):
objs = HelloWorldTenant.get_tenant_objects().filter(
Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
else:
# If this is a deletion we get all of the deleted tenants..
objs = HelloWorldTenant.get_deleted_tenant_objects()
return objs
# Gets the attributes that are used by the Ansible template but are not
# part of the set of default attributes.
def get_extra_attributes(self, o):
return {"display_message": o.display_message}
# Defines how to delete a HelloWorldTenant, since we don't have anyhting
# special to cleanup or dependencies we do nothing.
# def delete_record(self, m):
# return
| import os
import sys
from django.db.models import Q, F
from helloworldservice.models import HelloWorldService, HelloWorldTenant
from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
parentdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, parentdir)
# Class to define how we sync a tenant. Using SyncInstanceUsingAnsible we
# indicate where the find the YAML for ansible, where to find the SSH key,
# and the logic for determining what tenant needs updating, what additional
# attributes are needed, and how to delete an instance.
class SyncHelloWorldServiceTenant(SyncInstanceUsingAnsible):
# Indicates the position in the data model, this will run when XOS needs to
# enact a HelloWorldTenant
provides = [HelloWorldTenant]
# The actual model being enacted, usually the same as provides.
observes = HelloWorldTenant
# Number of miliseconds between interruptions of the observer
requested_interval = 0
# The ansible template to run
template_name = "sync_helloworldtenant.yaml"
# The location of the SSH private key to use when ansible connects to
# instances.
service_key_name = "/opt/xos/observers/helloworldservice/helloworldservice_private_key"
def __init__(self, *args, **kwargs):
super(SyncHelloWorldServiceTenant, self).__init__(*args, **kwargs)
# Defines the logic for determining what HelloWorldTenants need to be
# enacted.
def fetch_pending(self, deleted):
# If the update is not a deletion, then we get all of the instnaces that
# have been updated or have not been enacted.
if (not deleted):
objs = HelloWorldTenant.get_tenant_objects().filter(
Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
else:
# If this is a deletion we get all of the deleted tenants..
objs = HelloWorldTenant.get_deleted_tenant_objects()
return objs
# Gets the attributes that are used by the Ansible template but are not
# part of the set of default attributes.
def get_extra_attributes(self, o):
return {"display_message": o.display_message}
# Defines how to delete a HelloWorldTenant, since we don't have anyhting
# special to cleanup or dependencies we do nothing.
def delete_record(self, m):
return
| apache-2.0 | Python |
96e28bc426947d6e30000f7d867adebf6a1fe267 | Set version as 0.4.6 | Alignak-monitoring-contrib/alignak-notifications,Alignak-monitoring-contrib/alignak-notifications | version.py | version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
# David Durieux. david.durieux@alignak.net
#
"""
Alignak - Checks pack for mail, slack,... notifications
"""
# Package name
__pkg_name__ = u"alignak_notifications"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"notifications"
# Application manifest
__version__ = u"0.4.6"
__author__ = u"Frédéric MOHIER"
__author_email__ = u"frederic.mohier@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-checks-wmi"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak notifications script pack"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
# David Durieux. david.durieux@alignak.net
#
"""
Alignak - Checks pack for mail, slack,... notifications
"""
# Package name
__pkg_name__ = u"alignak_notifications"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"notifications"
# Application manifest
__version__ = u"0.4.5"
__author__ = u"Frédéric MOHIER"
__author_email__ = u"frederic.mohier@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-checks-wmi"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak notifications script pack"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
| agpl-3.0 | Python |
adbcb045d89fcb20f5ed758e985ba0f432178f6b | Set version as 0.2.2 | Alignak-monitoring-contrib/alignak-module-ws,Alignak-monitoring-contrib/alignak-module-ws | version.py | version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak - Receiver module for the external commands
"""
# Package name
__pkg_name__ = u"alignak_module_ws"
# Module type for PyPI keywords
# Used for:
# - PyPI keywords
__module_types__ = u"web-services"
# Application manifest
__version__ = u"0.2.2"
__author__ = u"Alignak team"
__author_email__ = u"frederic.mohier@gmail.com"
__copyright__ = u"(c) 2015-2016 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-module-ws"
__description__ = u"Alignak - Receiver module for Alignak Web Services"
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak - Receiver module for the external commands
"""
# Package name
__pkg_name__ = u"alignak_module_ws"
# Module type for PyPI keywords
# Used for:
# - PyPI keywords
__module_types__ = u"web-services"
# Application manifest
__version__ = u"0.2.1"
__author__ = u"Alignak team"
__author_email__ = u"frederic.mohier@gmail.com"
__copyright__ = u"(c) 2015-2016 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-module-ws"
__description__ = u"Alignak - Receiver module for Alignak Web Services"
| agpl-3.0 | Python |
13ec50a7e2187edb03174ed4a9dbf8767f4c6ad4 | Tag commit for v0.0.8-master generated by gitmake.py | ryansturmer/gitmake | version.py | version.py | major = 0
minor=0
patch=8
branch="master"
timestamp=1376412892.53 | major = 0
minor=0
patch=0
branch="dev"
timestamp=1376412824.91 | mit | Python |
5770754c7b451a9e175b4906a26dfae5ce4fe1e6 | Enable optimization. | ncareol/spatialdb,ncareol/spatialdb | tool_spatialdb.py | tool_spatialdb.py | import os
import sys
import eol_scons
tools = ['sqlitedb','doxygen','prefixoptions']
env = Environment(tools = ['default'] + tools)
thisdir = env.Dir('.').srcnode().abspath
libsources = Split("""
SpatiaLiteDB.cpp
""")
headers = Split("""
SpatiaLiteDB.h
""")
env.AppendUnique(CPPDEFINES=['SPATIALITE_AMALGAMATION',])
libspatialdb = env.Library('spatialdb', libsources)
env.Default(libspatialdb)
html = env.Apidocs(libsources + headers, DOXYFILE_DICT={'PROJECT_NAME':'SpatiaLiteDB', 'PROJECT_NUMBER':'1.0'})
env.Default(html)
def spatialdb(env):
env.AppendUnique(CPPPATH =[thisdir,])
env.AppendLibrary('spatialdb')
env.AppendLibrary('geos')
env.AppendLibrary('geos_c')
env.AppendLibrary('proj')
env.AppendLibrary('iconv')
env.AppendDoxref('SpatialDB')
env.AppendUnique(CPPDEFINES=['SPATIALITE_AMALGAMATION',])
env.Replace(CCFLAGS=['-g','-O2'])
env.Require(tools)
Export('spatialdb')
| import os
import sys
import eol_scons
tools = ['sqlitedb','doxygen','prefixoptions']
env = Environment(tools = ['default'] + tools)
thisdir = env.Dir('.').srcnode().abspath
libsources = Split("""
SpatiaLiteDB.cpp
""")
headers = Split("""
SpatiaLiteDB.h
""")
env.AppendUnique(CPPDEFINES=['SPATIALITE_AMALGAMATION',])
libspatialdb = env.Library('spatialdb', libsources)
env.Default(libspatialdb)
html = env.Apidocs(libsources + headers, DOXYFILE_DICT={'PROJECT_NAME':'SpatiaLiteDB', 'PROJECT_NUMBER':'1.0'})
env.Default(html)
def spatialdb(env):
env.AppendUnique(CPPPATH =[thisdir,])
env.AppendLibrary('spatialdb')
env.AppendLibrary('geos')
env.AppendLibrary('geos_c')
env.AppendLibrary('proj')
env.AppendLibrary('iconv')
env.AppendDoxref('SpatialDB')
env.AppendUnique(CPPDEFINES=['SPATIALITE_AMALGAMATION',])
env.Require(tools)
Export('spatialdb')
| bsd-3-clause | Python |
cf7aed705dbfeaa8b9e56a0649fd70f48d80d33f | bump protocol version number, to see which servers have get_header and get_merkle | protonn/electrum-dgb-server,repos-bitcoin/electrum-server,erasmospunk/electrum-server,cryptapus/electrum-server,mazaclub/electrum-nmc-server,thelazier/electrum-dash-server,Kefkius/encompass-mercury,electrumalt/electrum-ixc-server,electrumalt/electrum-doge-server,vialectrum/vialectrum-server,lbryio/lbryum-server,nmarley/electrum-dash-server,erasmospunk/electrum-nvc-server,argentumproject/electrum-arg-server,doged/encompass-mercury,okcashpro/electrum-ok-server,Kefkius/electrum-nmc-server,CryptoManiac/electrum-server,AsiaCoin/electrum-server,testalt/electrum-ppc-server,mazaclub/tate-server,cryptapus/electrum-server-myr,fsb4000/electrum-server,okcashpro/electrum-ok-server,testalt/electrum-drk-server,UberPay/electrum-nmc-server,GroestlCoin/electrum-grs-server,AsiaCoin/electrum-server,sorce/electrum-server,vcoin-project/electrum-server-vcn,visvirial/electrum-server,CryptoManiac/electrum-server,joshafest/electrum-fnx,OverlordQ/electrum-server,GroestlCoin/electrum-grs-server,TrainMAnB/electrum-vcn-server,erasmospunk/electrum-server,visvirial/electrum-server,cryptapus/electrum-server-uno,pooler/electrum-ltc-server,habibmasuro/electrum-nvc-server,lbryio/lbryum-server,Kefkius/scallop-server,bauerj/electrum-server,eXcomm/electrum-server,Kefkius/encompass-mercury,ALEXIUMCOIN/electrum-server,neoscoin/neos-electrum-server,reddink/reddcoin-electrum-server,dashpay/electrum-dash-server,reddink/reddcoin-electrum-server,fsb4000/electrum-server,electrumalt/electrum-ixc-server,CryptoManiac/electrum-server,mazaclub/encompass-mercury,fsb4000/electrum-server,erasmospunk/electrum-nvc-server,ALEXIUMCOIN/electrum-server,fireduck64/electrum-server,eXcomm/electrum-server,ALEXIUMCOIN/electrum-server,iswt/electrum-server,argentumproject/electrum-arg-server,TrainMAnB/electrum-vcn-server,mazaclub/encompass-mercury,nmarley/electrum-dash-server,OverlordQ/electrum-server,electrumalt/electrum-doge-server,spesmilo/electrum-server,testalt/electrum-nmc-server,testalt/electrum-dvc-server,testalt/electrum-msc-server,testalt/electrum-dvc-server,testalt/electrum-nmc-server,dashpay/electrum-dash-server,thelazier/electrum-dash-server,protonn/electrum-dgb-server,doged/encompass-mercury,shsmith/electrum-server,AsiaCoin/electrum-server,fireduck64/electrum-server,nmarley/electrum-dash-server,cryptapus/electrum-server-myr,sorce/electrum-server,Verbalist/electrum-server,joshafest/electrum-fnx,spesmilo/electrum-server,vcoin-project/electrum-server-vcn,testalt/electrum-ppc-server,testalt/electrum-drk-server,Verbalist/electrum-server,habibmasuro/electrum-nvc-server,UberPay/electrum-nmc-server,habibmasuro/electrum-nvc-server,iswt/electrum-server,mazaclub/tate-server,AsiaCoin/electrum-server,testalt/electrum-msc-server,mazaclub/electrum-nmc-server,pooler/electrum-ltc-server,testalt/electrum-dgc-server,testalt/electrum-dgc-server,shsmith/electrum-server,ALEXIUMCOIN/electrum-server,habibmasuro/electrum-server,fsb4000/electrum-server,nmarley/electrum-dash-server,bauerj/electrum-server,vialectrum/vialectrum-server,Kefkius/electrum-nmc-server,erasmospunk/electrum-nvc-server,habibmasuro/electrum-nvc-server,repos-bitcoin/electrum-server,Kefkius/scallop-server,CryptoManiac/electrum-server,cryptapus/electrum-server-uno,neoscoin/neos-electrum-server,cryptapus/electrum-server,habibmasuro/electrum-server,erasmospunk/electrum-nvc-server | version.py | version.py | VERSION = "0.2"
| VERSION = "0.1"
| mit | Python |
0e6e66b4e4ea13e28f089e363d8f1409623566b6 | Implement set() method. | RedMoonStudios/hetzner | hetzner/rdns.py | hetzner/rdns.py | from urllib import urlencode
from hetzner import RobotError
class ReverseDNS(object):
def __init__(self, conn, ip=None, result=None):
self.conn = conn
self.ip = ip
self.update_info(result)
def update_info(self, result=None):
if result is None:
try:
result = self.conn.get('/rdns/{0}'.format(self.ip))
except RobotError as err:
if err.status == 404:
result = None
else:
raise
if result is not None:
data = result['rdns']
self.ip = data['ip']
self.ptr = data['ptr']
else:
self.ptr = None
def set(self, value):
self.conn.post('/rdns/{0}'.format(self.ip), {'ptr': value})
def __repr__(self):
return "<ReverseDNS PTR: {0}>".format(self.ptr)
class ReverseDNSManager(object):
def __init__(self, conn, main_ip=None):
self.conn = conn
self.main_ip = main_ip
def get(self, ip):
return ReverseDNS(self.conn, ip)
def __iter__(self):
if self.main_ip is None:
url = '/rdns'
else:
data = urlencode({'server_ip': self.main_ip})
url = '/rdns?{0}'.format(data)
try:
result = self.conn.get(url)
except RobotError as err:
if err.status == 404:
result = []
else:
raise
return iter([ReverseDNS(self.conn, result=rdns) for rdns in result])
| from urllib import urlencode
from hetzner import RobotError
class ReverseDNS(object):
def __init__(self, conn, ip=None, result=None):
self.conn = conn
self.ip = ip
self.update_info(result)
def update_info(self, result=None):
if result is None:
try:
result = self.conn.get('/rdns/{0}'.format(self.ip))
except RobotError as err:
if err.status == 404:
result = None
else:
raise
if result is not None:
data = result['rdns']
self.ip = data['ip']
self.ptr = data['ptr']
else:
self.ptr = None
def set(self, value):
pass
def __repr__(self):
return "<ReverseDNS PTR: {0}>".format(self.ptr)
class ReverseDNSManager(object):
def __init__(self, conn, main_ip=None):
self.conn = conn
self.main_ip = main_ip
def get(self, ip):
return ReverseDNS(self.conn, ip)
def __iter__(self):
if self.main_ip is None:
url = '/rdns'
else:
data = urlencode({'server_ip': self.main_ip})
url = '/rdns?{0}'.format(data)
try:
result = self.conn.get(url)
except RobotError as err:
if err.status == 404:
result = []
else:
raise
return iter([ReverseDNS(self.conn, result=rdns) for rdns in result])
| bsd-3-clause | Python |
f3fd5fb2fefa0cdf421c4a76154300c7395444f4 | bump version | Stvad/anki,jakesyl/ruby-card,Arthaey/anki,xuewenfei/anki,socialpercon/anki-1,go38/anki,kenjhim/anki,ospalh/libanki3,hssm/anki,holycrepe/anki,jakesyl/ruby-card,eduOS/anki,abeyer/anki,hssm/anki,LucasCabello/anki,florianjacob/anki,sunclx/anki,subfusc/anki,jkitching/anki | anki/__init__.py | anki/__init__.py | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import sys
import os
import platform
if sys.version_info[0] > 2:
raise Exception("Anki should be run with Python 2")
elif sys.version_info[1] < 6:
raise Exception("Anki requires Python 2.6+")
elif sys.getfilesystemencoding().lower() in ("ascii", "ansi_x3.4-1968"):
raise Exception("Anki requires a UTF-8 locale.")
try:
import simplejson as json
except:
import json as json
if json.__version__ < "1.7.3":
raise Exception("SimpleJSON must be 1.7.3 or later.")
# add path to bundled third party libs
ext = os.path.realpath(os.path.join(
os.path.dirname(__file__), "../thirdparty"))
sys.path.insert(0, ext)
arch = platform.architecture()
if arch[1] == "ELF":
# add arch-dependent libs
sys.path.insert(0, os.path.join(ext, "py2.%d-%s" % (
sys.version_info[1], arch[0][0:2])))
version="2.0.29" # build scripts grep this line, so preserve formatting
from anki.storage import Collection
__all__ = ["Collection"]
| # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import sys
import os
import platform
if sys.version_info[0] > 2:
raise Exception("Anki should be run with Python 2")
elif sys.version_info[1] < 6:
raise Exception("Anki requires Python 2.6+")
elif sys.getfilesystemencoding().lower() in ("ascii", "ansi_x3.4-1968"):
raise Exception("Anki requires a UTF-8 locale.")
try:
import simplejson as json
except:
import json as json
if json.__version__ < "1.7.3":
raise Exception("SimpleJSON must be 1.7.3 or later.")
# add path to bundled third party libs
ext = os.path.realpath(os.path.join(
os.path.dirname(__file__), "../thirdparty"))
sys.path.insert(0, ext)
arch = platform.architecture()
if arch[1] == "ELF":
# add arch-dependent libs
sys.path.insert(0, os.path.join(ext, "py2.%d-%s" % (
sys.version_info[1], arch[0][0:2])))
version="2.0.28" # build scripts grep this line, so preserve formatting
from anki.storage import Collection
__all__ = ["Collection"]
| agpl-3.0 | Python |
9dc22371a6b79a455a137d313ca8643ac59afec2 | Update announcements.py | Boijangle/GroupMe-Message-Bot | announcements.py | announcements.py | import sys
import icalendar
import requests
import pytz
from datetime import datetime, timedelta
from libs import post_text
from icalendar import Calendar
from database import find_bot_nname
import re
r = requests.get(sys.argv[2])
icsData = r.text
cal = Calendar.from_ical(icsData)
for evt in cal.walk('vevent'):
print(evt)
start = evt.decoded('DTSTART')
now = datetime.now(tz=pytz.utc)
time_left = start - now
if timedelta(minutes=0) < time_left < timedelta(minutes=10):
raw_text = str(evt.get('SUMMARY'))
search = re.search(r"([^ ]+)\s(.+)", raw_text)
(nname, message) = search.groups('1')
nname = nname[2:]
message = message[:-1]
print(nname)
print(message)
bot_id = find_bot_nname(nname)
if not bot_id:
bot_id = sys.argv[1]
post_text("I was supposed to post '" + message + "' to " + nname, bot_id)
else:
bot_id = bot_id[0][0]
post_text(message, bot_id)
| import sys
import icalendar
import requests
import pytz
from datetime import datetime, timedelta
from libs import post_text
from icalendar import Calendar
from database import find_bot_nname
import re
r = requests.get(sys.argv[2])
icsData = r.text
cal = Calendar.from_ical(icsData)
for evt in cal.walk('vevent'):
print(evt)
start = evt.get('DTSTART').date()
now = datetime.now(tz=pytz.utc)
time_left = start - now
if timedelta(minutes=0) < time_left < timedelta(minutes=10):
raw_text = str(evt.get('SUMMARY'))
search = re.search(r"([^ ]+)\s(.+)", raw_text)
(nname, message) = search.groups('1')
nname = nname[2:]
message = message[:-1]
print(nname)
print(message)
bot_id = find_bot_nname(nname)
if not bot_id:
bot_id = sys.argv[1]
post_text("I was supposed to post '" + message + "' to " + nname, bot_id)
else:
bot_id = bot_id[0][0]
post_text(message, bot_id)
| mit | Python |
b832c794ad28d3dd9675a72389fc8f0a00a2c035 | Fix yapf | adamtheturtle/vws-python,adamtheturtle/vws-python | tests/mock_vws/test_target_summary.py | tests/mock_vws/test_target_summary.py | """
Tests for the mock of the target summary endpoint.
"""
import pytest
from urllib.parse import urljoin
import requests
from requests_mock import GET
from tests.utils import VuforiaServerCredentials
from vws._request_utils import authorization_header, rfc_1123_date
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestTargetSummary:
"""
Tests for the target summary endpoint.
"""
def test_target_summary(
self,
vuforia_server_credentials: VuforiaServerCredentials,
target_id: str,
) -> None:
"""
A target summary is returned.
"""
content_type = 'application/json'
date = rfc_1123_date()
request_path = '/summary/' + target_id
authorization_string = authorization_header(
access_key=vuforia_server_credentials.access_key,
secret_key=vuforia_server_credentials.secret_key,
method=GET,
content=b'',
content_type=content_type,
date=date,
request_path=request_path,
)
headers = {
"Authorization": authorization_string,
"Date": date,
'Content-Type': content_type,
}
response = requests.request(
method=GET,
url=urljoin('https://vws.vuforia.com/', request_path),
headers=headers,
data='',
)
expected_keys = {
'status',
'result_code',
'transaction_id',
'database_name',
'target_name',
'upload_date',
'active_flag',
'tracking_rating',
'total_recos',
'current_month_recos',
'previous_month_recos',
}
assert response.json().keys() == expected_keys
| """
Tests for the mock of the target summary endpoint.
"""
import pytest
from urllib.parse import urljoin
import requests
from requests_mock import GET
from tests.utils import VuforiaServerCredentials
from vws._request_utils import authorization_header, rfc_1123_date
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestTargetSummary:
"""
Tests for the target summary endpoint.
"""
def test_target_summary(
self,
vuforia_server_credentials: VuforiaServerCredentials,
target_id: str,
) -> None:
"""
A target summary is returned.
"""
content_type = 'application/json'
date = rfc_1123_date()
request_path = '/summary/' + target_id
authorization_string = authorization_header(
access_key=vuforia_server_credentials.access_key,
secret_key=vuforia_server_credentials.secret_key,
method=GET,
content=b'',
content_type=content_type,
date=date,
request_path=request_path,
)
headers = {
"Authorization": authorization_string,
"Date": date,
'Content-Type': content_type,
}
response = requests.request(
method=GET,
url=urljoin('https://vws.vuforia.com/', request_path),
headers=headers,
data='',
)
expected_keys = {
'result_code',
}
expected_keys = {
'status',
'result_code',
'transaction_id',
'database_name',
'target_name',
'upload_date',
'active_flag',
'tracking_rating',
'total_recos',
'current_month_recos',
'previous_month_recos',
}
assert response.json().keys() == expected_keys
| mit | Python |
98540ab8e936ccf605d66dac66aa731a9ee83b42 | remove trailing line flake8 | marcelometal/pyvows,heynemann/pyvows | tests/no_subcontext_extension_vows.py | tests/no_subcontext_extension_vows.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
from pyvows import Vows, expect
@Vows.batch
class ContextClass(Vows.Context):
entered = False
def topic(self):
return 1
def should_be_working_fine(self, topic):
expect(topic).to_equal(1)
def teardown(self):
# note to readers: 'expect's are not recommended on teardown methods
expect(self.entered).to_equal(True)
class SubcontextThatDoesntNeedToExtendAgainFromContext:
entered = False
def topic(self):
return 2
def should_be_working_fine_too(self, topic):
self.parent.entered = True
expect(topic).to_equal(2)
def teardown(self):
# note to readers: 'expect's are not recommended on teardown methods
expect(self.entered).to_equal(True)
class SubcontextThatDoesntNeedToExtendAgainFromContext:
def topic(self):
return 3
def should_be_working_fine_too(self, topic):
self.parent.entered = True
expect(topic).to_equal(3)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
from pyvows import Vows, expect
@Vows.batch
class ContextClass(Vows.Context):
entered = False
def topic(self):
return 1
def should_be_working_fine(self, topic):
expect(topic).to_equal(1)
def teardown(self):
# note to readers: 'expect's are not recommended on teardown methods
expect(self.entered).to_equal(True)
class SubcontextThatDoesntNeedToExtendAgainFromContext:
entered = False
def topic(self):
return 2
def should_be_working_fine_too(self, topic):
self.parent.entered = True
expect(topic).to_equal(2)
def teardown(self):
# note to readers: 'expect's are not recommended on teardown methods
expect(self.entered).to_equal(True)
class SubcontextThatDoesntNeedToExtendAgainFromContext:
def topic(self):
return 3
def should_be_working_fine_too(self, topic):
self.parent.entered = True
expect(topic).to_equal(3)
| mit | Python |
81dfd09b8ad7a3e070227040e3fa6714578e8773 | Add parameter documentation to forking module | VEVO/hidi | hidi/forking.py | hidi/forking.py | from hidi.transform import Transform
from functools import partial
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
def map_fn(pipeline, io, progress, **kwargs):
return pipeline.run(io, progress=progress, **kwargs)
class ExecutorFork(Transform):
def __init__(self, pipelines, progress=False):
self.pipelines = pipelines
self.progress = progress
def executor_fork(self, Executor, io, **kwargs):
with Executor(len(self.pipelines)) as ex:
run = partial(map_fn, io=io, progress=self.progress, **kwargs)
return list(ex.map(run, self.pipelines)), kwargs
class ThreadForkTransform(ExecutorFork):
"""
Fork a pipeline using :code:`concurrent.futures.ThreadPoolExecutor`
as a backend for execution.
This is useful if you have several transforms that perform well
when running in concurrent threads such as IO heavy or CPU heavy
tasks that execute outside the Python runtime.
The forked transform will return a list of Pipeline outputs,
in the same order as the forked pipelines were given.
:param pipelines: An array of pipelines to fork execution to.
:type pipelines: list[hidi.pipeline.Pipeline]
:param progress: When True, progress of the forked pipelines
will be logged.
:type progress: bool
"""
def transform(self, io, **kwargs):
return self.executor_fork(ThreadPoolExecutor, io, **kwargs)
class ProcessForkTransform(ExecutorFork):
"""
Fork a pipeline using :code:`concurrent.futures.ProcessesPoolExecutor`
as a backend for execution.
This method is useful if you have several transforms that
can be executed concurrently and are CPU intensive.
The forked pipeline will now return a list of pipeline ouputs,
in the same order as the forked pipelines were given.
Special care must be taken as each transform must be pickled
to a new process.
:param pipelines: An array of pipelines to fork execution to.
:type pipelines: list[hidi.pipeline.Pipeline]
:param progress: When True, progress of the forked pipelines
will be logged.
:type progress: bool
"""
def transform(self, io, **kwargs):
return self.executor_fork(ProcessPoolExecutor, io, **kwargs)
class TrivialForkTransform(Transform):
"""
Trivial Fork Transform using an ordinary loop.
:param pipelines: An array of pipelines to fork execution to.
:type pipelines: list[hidi.pipeline.Pipeline]
:param progress: When True, progress of the forked pipelines
will be logged.
:type progress: bool
"""
def __init__(self, pipelines, progress=False):
self.pipelines = pipelines
self.progress = progress
def transform(self, io, **kwargs):
output = []
for pipeline in self.pipelines:
output.append(pipeline.run(io, progress=self.progress, **kwargs))
return output, kwargs
| from hidi.transform import Transform
from functools import partial
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
def map_fn(pipeline, io, progress, **kwargs):
return pipeline.run(io, progress=progress, **kwargs)
class ExecutorFork(Transform):
def __init__(self, pipelines, progress=False):
self.pipelines = pipelines
self.progress = progress
def executor_fork(self, Executor, io, **kwargs):
with Executor(len(self.pipelines)) as ex:
run = partial(map_fn, io=io, progress=self.progress, **kwargs)
return list(ex.map(run, self.pipelines)), kwargs
class ThreadForkTransform(ExecutorFork):
"""
Fork a pipeline using :code:`concurrent.futures.ThreadPoolExecutor`
as a backend for execution.
This transform takes a list of Pipeline instances upon
initialization.
This is useful if you have several transforms that perform well
when running in concurrent threads such as IO heavy or CPU heavy
tasks that execute outside the Python runtime.
The forked transform will return a list of Pipeline outputs,
in the same order as the forked pipelines were given.
"""
def transform(self, io, **kwargs):
return self.executor_fork(ThreadPoolExecutor, io, **kwargs)
class ProcessForkTransform(ExecutorFork):
"""
Fork a pipeline using :code:`concurrent.futures.ProcessesPoolExecutor`
as a backend for execution.
This transform takes a list of Pipeline instances upon initialization.
This method is useful if you have several transforms that
can be executed concurrently and are CPU intensive.
The forked pipeline will now return a list of pipeline ouputs,
in the same order as the forked pipelines were given.
Special care must be taken as each transform must be pickled
to a new process.
"""
def transform(self, io, **kwargs):
return self.executor_fork(ProcessPoolExecutor, io, **kwargs)
class TrivialForkTransform(Transform):
"""
Trivial Fork Transform using an ordinary loop.
"""
def __init__(self, pipelines, progress=False):
self.pipelines = pipelines
self.progress = progress
def transform(self, io, **kwargs):
output = []
for pipeline in self.pipelines:
output.append(pipeline.run(io, progress=self.progress, **kwargs))
return output, kwargs
| apache-2.0 | Python |
5045181241e9a6e4a2cd05a93da7ee0b63b4a568 | Allow the foreground reindex process to be interrupted. | kostko/itsy | itsy/management/commands/itsy_reindex.py | itsy/management/commands/itsy_reindex.py | import optparse
import traceback
from django.core.management import base as management_base
from django.utils import importlib
from ... import document as itsy_document
from ... import tasks as itsy_tasks
class Command(management_base.BaseCommand):
args = "class_path"
help = "Performs a reindex of the given document class."
requires_model_validation = True
option_list = management_base.BaseCommand.option_list + (
optparse.make_option('--background', action = 'store_true', dest = 'background', default = False,
help = "Should the reindexing be performed by background workers."),
optparse.make_option('--recreate-index', action = 'store_true', dest = 'recreate-index', default = False,
help = "Should the index be dropped and recreated. THIS WILL ERASE ALL DATA!")
)
def handle(self, *args, **options):
"""
Performs a reindex of the given document class.
"""
if len(args) != 1:
raise management_base.CommandError("Reindex command takes exactly one argument!")
# Load the specified document class
class_path = args[0]
module_name = class_path[:class_path.rfind(".")]
class_name = class_path[class_path.rfind(".") + 1:]
module = importlib.import_module(module_name)
document_class = getattr(module, class_name)
if not issubclass(document_class, itsy_document.Document):
raise management_base.CommandError("Specified class is not a valid Document!")
if not document_class._meta.searchable or document_class._meta.abstract or document_class._meta.embedded:
raise management_base.CommandError("Specified document is not searchable!")
if options.get("recreate-index"):
# Drop the index and recreate it
self.stdout.write("Recreating index...\n")
document_class._meta.search_engine.drop()
document_class._meta.emit_search_mappings()
if options.get("background"):
# Spawn the reindex task
itsy_tasks.search_index_reindex.delay(document_class)
# Notify the user that the reindex has started in the background
self.stdout.write("Reindex of %s has been initiated in the background.\n" % class_path)
else:
self.stdout.write("Performing foreground reindex of %s...\n" % class_path)
for no, document in enumerate(document_class.find().order_by("pk")):
try:
document.save(target = itsy_document.DocumentSource.Search)
except KeyboardInterrupt:
raise
except:
# Print the exception and continue reindexing
traceback.print_exc()
if (no + 1) % 1000 == 0:
self.stdout.write("Indexed %d documents.\n" % (no + 1))
self.stdout.write("Reindex done.\n")
| import optparse
import traceback
from django.core.management import base as management_base
from django.utils import importlib
from ... import document as itsy_document
from ... import tasks as itsy_tasks
class Command(management_base.BaseCommand):
args = "class_path"
help = "Performs a reindex of the given document class."
requires_model_validation = True
option_list = management_base.BaseCommand.option_list + (
optparse.make_option('--background', action = 'store_true', dest = 'background', default = False,
help = "Should the reindexing be performed by background workers."),
optparse.make_option('--recreate-index', action = 'store_true', dest = 'recreate-index', default = False,
help = "Should the index be dropped and recreated. THIS WILL ERASE ALL DATA!")
)
def handle(self, *args, **options):
"""
Performs a reindex of the given document class.
"""
if len(args) != 1:
raise management_base.CommandError("Reindex command takes exactly one argument!")
# Load the specified document class
class_path = args[0]
module_name = class_path[:class_path.rfind(".")]
class_name = class_path[class_path.rfind(".") + 1:]
module = importlib.import_module(module_name)
document_class = getattr(module, class_name)
if not issubclass(document_class, itsy_document.Document):
raise management_base.CommandError("Specified class is not a valid Document!")
if not document_class._meta.searchable or document_class._meta.abstract or document_class._meta.embedded:
raise management_base.CommandError("Specified document is not searchable!")
if options.get("recreate-index"):
# Drop the index and recreate it
self.stdout.write("Recreating index...\n")
document_class._meta.search_engine.drop()
document_class._meta.emit_search_mappings()
if options.get("background"):
# Spawn the reindex task
itsy_tasks.search_index_reindex.delay(document_class)
# Notify the user that the reindex has started in the background
self.stdout.write("Reindex of %s has been initiated in the background.\n" % class_path)
else:
self.stdout.write("Performing foreground reindex of %s...\n" % class_path)
for no, document in enumerate(document_class.find().order_by("pk")):
try:
document.save(target = itsy_document.DocumentSource.Search)
except:
# Print the exception and continue reindexing
traceback.print_exc()
if (no + 1) % 1000 == 0:
self.stdout.write("Indexed %d documents.\n" % (no + 1))
self.stdout.write("Reindex done.\n")
| bsd-3-clause | Python |
181ffad1eaebc14532d9a3172df1805f7bc066d7 | Fix crash with Printer.warning | nuagenetworks/monolithe,little-dude/monolithe,little-dude/monolithe,nuagenetworks/monolithe,little-dude/monolithe,nuagenetworks/monolithe | monolithe/generators/vspk/vspkgenerator.py | monolithe/generators/vspk/vspkgenerator.py | # -*- coding: utf-8 -*-
import os
import shutil
from monolithe.lib.utils.vsdk import VSDKUtils
from monolithe.lib.utils.printer import Printer
class VSPKGenerator(object):
""" Create a VSPK Package containing SDK versions
"""
def __init__(self, versions):
""" Initialize a VSPKGenerator
"""
self.versions = versions
self._path_vanilla_vspk = '%s/vanilla/vspk' % os.path.dirname(os.path.realpath(__file__))
self._path_codegen = "./codegen"
self._path_generated_vspk = "%s/vspk" % self._path_codegen
def run(self):
""" Create the VSPK package
"""
self._prepare_vspk_destination(self._path_vanilla_vspk, self._path_generated_vspk)
for version in self.versions:
if version == 'master':
Printer.warn('master branch should be used for development purpose only.')
self._include_vsdk(version, self._path_codegen, self._path_generated_vspk)
def _prepare_vspk_destination(self, source_path, destination_path):
""" Clean up detination environement
"""
if os.path.exists(destination_path):
shutil.rmtree(destination_path)
shutil.copytree(source_path, destination_path)
def _include_vsdk(self, vsdk_version, vsdk_base_path, vspk_path):
""" Install Generated version of vsdk to vspk"
"""
parsed_version = VSDKUtils.get_string_version(vsdk_version)
source_sdk_path = "%s/%s/vsdk/" % (vsdk_base_path, vsdk_version)
dest_sdk_path = "%s/vspk/vsdk/%s" % (vspk_path, parsed_version)
Printer.success("Adding VSDK version %s to VSPK" % vsdk_version)
shutil.copytree(source_sdk_path, dest_sdk_path)
| # -*- coding: utf-8 -*-
import os
import shutil
from monolithe.lib.utils.vsdk import VSDKUtils
from monolithe.lib.utils.printer import Printer
class VSPKGenerator(object):
""" Create a VSPK Package containing SDK versions
"""
def __init__(self, versions):
""" Initialize a VSPKGenerator
"""
self.versions = versions
self._path_vanilla_vspk = '%s/vanilla/vspk' % os.path.dirname(os.path.realpath(__file__))
self._path_codegen = "./codegen"
self._path_generated_vspk = "%s/vspk" % self._path_codegen
def run(self):
""" Create the VSPK package
"""
self._prepare_vspk_destination(self._path_vanilla_vspk, self._path_generated_vspk)
for version in self.versions:
if version == 'master':
Printer.warning('master branch should be used for development purpose only.')
self._include_vsdk(version, self._path_codegen, self._path_generated_vspk)
def _prepare_vspk_destination(self, source_path, destination_path):
""" Clean up detination environement
"""
if os.path.exists(destination_path):
shutil.rmtree(destination_path)
shutil.copytree(source_path, destination_path)
def _include_vsdk(self, vsdk_version, vsdk_base_path, vspk_path):
""" Install Generated version of vsdk to vspk"
"""
parsed_version = VSDKUtils.get_string_version(vsdk_version)
source_sdk_path = "%s/%s/vsdk/" % (vsdk_base_path, vsdk_version)
dest_sdk_path = "%s/vspk/vsdk/%s" % (vspk_path, parsed_version)
Printer.success("Adding VSDK version %s to VSPK" % vsdk_version)
shutil.copytree(source_sdk_path, dest_sdk_path)
| bsd-3-clause | Python |
06640caec2aea61a17eab32e6806ec40e55e70cb | Bump version to 0.16.2 | incuna/incuna-feincms,incuna/incuna-feincms,incuna/incuna-feincms | incunafein/__init__.py | incunafein/__init__.py | __version__ = (0, 16, 2)
def get_version():
return '.'.join(map(str, __version__))
| __version__ = (0, 16, 1)
def get_version():
return '.'.join(map(str, __version__))
| bsd-2-clause | Python |
3cc4f50162db35de5a9cb17b2abb002f3152abb4 | Fix arg parsing tests | ionrock/withenv | tests/test_arg_parsing.py | tests/test_arg_parsing.py | import pytest
from withenv import cli
class TestArgParsing(object):
def setup(self):
self.result = {
'cmd': [],
'actions': []
}
def test_defaults(self):
assert cli.parse_args([]) == self.result
def test_mixed_long_with_cmd(self):
self.result['actions'] = [
(cli.update_env_from_file, 'foo.yml'),
(cli.update_env_from_file, 'bar.yml'),
(cli.update_env_from_dir, 'baz'),
]
self.result['cmd'] = ['ls', '-la']
args = [
'-e', 'foo.yml',
'--environment', 'bar.yml',
'-d', 'baz',
'ls', '-la'
]
assert cli.parse_args(args) == self.result
| import pytest
from withenv.cli import parse_args
class TestArgParsing(object):
def setup(self):
self.result = {
'cmd': [],
'env_files': [],
'env_dirs': [],
}
def test_defaults(self):
assert parse_args([]) == self.result
def test_single_short(self):
self.result['env_files'].append('foo.yml')
assert parse_args(['-e', 'foo.yml']) == self.result
def test_single_long(self):
self.result['env_files'].append('foo.yml')
assert parse_args(['--environment', 'foo.yml']) == self.result
def test_multi_short(self):
self.result['env_files'] = ['foo.yml', 'bar.yml']
args = [
'-e', 'foo.yml',
'-e', 'bar.yml',
]
assert parse_args(args) == self.result
def test_multi_long(self):
self.result['env_files'] = ['foo.yml', 'bar.yml']
args = [
'--environment', 'foo.yml',
'--environment', 'bar.yml',
]
assert parse_args(args) == self.result
def test_mixed_long(self):
self.result['env_files'] = ['foo.yml', 'bar.yml', 'baz.yml']
args = [
'-e', 'foo.yml',
'--environment', 'bar.yml',
'-e', 'baz.yml',
]
assert parse_args(args) == self.result
def test_mixed_long_with_cmd(self):
self.result['env_files'] = ['foo.yml', 'bar.yml', 'baz.yml']
self.result['cmd'] = ['ls', '-la']
args = [
'-e', 'foo.yml',
'--environment', 'bar.yml',
'-e', 'baz.yml',
'ls', '-la'
]
assert parse_args(args) == self.result
| bsd-3-clause | Python |
8d515e0329d9305fe6a1aa1f917c4ba3730e8a88 | use safe_eval instead of eval | acsone/stock-logistics-warehouse,kmee/stock-logistics-warehouse,open-synergy/stock-logistics-warehouse | stock_available/models/product_product.py | stock_available/models/product_product.py | # -*- coding: utf-8 -*-
# © 2014 Numérigraphe SARL
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
from openerp.tools.safe_eval import safe_eval
class ProductProduct(models.Model):
"""Add a field for the stock available to promise.
Useful implementations need to be installed through the Settings menu or by
installing one of the modules stock_available_*
"""
_inherit = 'product.product'
@api.multi
@api.depends('virtual_available')
def _immediately_usable_qty(self):
"""No-op implementation of the stock available to promise.
By default, available to promise = forecasted quantity.
**Each** sub-module **must** override this method in **both**
`product.product` **and** `product.template`, because we can't
decide in advance how to compute the template's quantity from the
variants.
"""
for prod in self:
prod.immediately_usable_qty = prod.virtual_available
def _search_immediately_usable_quantity(self, operator, value):
res = []
assert operator in (
'<', '>', '=', '!=', '<=', '>='
), 'Invalid domain operator'
assert isinstance(
value, (float, int)
), 'Invalid domain value'
if operator == '=':
operator = '=='
ids = []
products = self.search([])
for prod in products:
expr = str(prod.immediately_usable_qty) + operator + str(value)
eval_dict = {'prod': prod, 'operator': operator, 'value': value}
if safe_eval(expr, eval_dict):
ids.append(prod.id)
res.append(('id', 'in', ids))
return res
immediately_usable_qty = fields.Float(
digits=dp.get_precision('Product Unit of Measure'),
compute='_immediately_usable_qty',
search='_search_immediately_usable_quantity',
string='Available to promise',
help="Stock for this Product that can be safely proposed "
"for sale to Customers.\n"
"The definition of this value can be configured to suit "
"your needs")
| # -*- coding: utf-8 -*-
# © 2014 Numérigraphe SARL
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class ProductProduct(models.Model):
"""Add a field for the stock available to promise.
Useful implementations need to be installed through the Settings menu or by
installing one of the modules stock_available_*
"""
_inherit = 'product.product'
@api.multi
@api.depends('virtual_available')
def _immediately_usable_qty(self):
"""No-op implementation of the stock available to promise.
By default, available to promise = forecasted quantity.
**Each** sub-module **must** override this method in **both**
`product.product` **and** `product.template`, because we can't
decide in advance how to compute the template's quantity from the
variants.
"""
for prod in self:
prod.immediately_usable_qty = prod.virtual_available
def _search_immediately_usable_quantity(self, operator, value):
res = []
assert operator in (
'<', '>', '=', '!=', '<=', '>='
), 'Invalid domain operator'
assert isinstance(
value, (float, int)
), 'Invalid domain value'
if operator == '=':
operator = '=='
ids = []
products = self.search([])
for prod in products:
if eval(str(prod.immediately_usable_qty) + operator + str(value)):
ids.append(prod.id)
res.append(('id', 'in', ids))
return res
immediately_usable_qty = fields.Float(
digits=dp.get_precision('Product Unit of Measure'),
compute='_immediately_usable_qty',
search='_search_immediately_usable_quantity',
string='Available to promise',
help="Stock for this Product that can be safely proposed "
"for sale to Customers.\n"
"The definition of this value can be configured to suit "
"your needs")
| agpl-3.0 | Python |
466a29cb543c097c3976f096b1b0b349fc1bd645 | Remove exception handling in main | salvor7/MarkovChainBibleBot | ReligiousPhraseMC/holy_twitter.py | ReligiousPhraseMC/holy_twitter.py | """Coordinates the twitter api with the markov chain models"""
import json
from pprint import pprint
from tweepy import Stream, OAuthHandler, API
from tweepy.streaming import StreamListener
from holy_markov import OldTestaPassagesMarkov
from twitter_secrets import api_tokens as at
class HolyListener(StreamListener):
old_testa = OldTestaPassagesMarkov()
bot_name = 'HolyStupidArt'
def send_passage(self, screen_name):
if screen_name != self.bot_name:
print('Passage sent to @' + screen_name)
passage = self.old_testa.twitter_message(line_length=(140 - len(screen_name) - 2))
tweet = ''.join(['@', screen_name, ' ', passage])
self.api.update_status(tweet)
def on_connect(self):
print("Connection established!!")
def on_disconnect(self, notice):
print("Connection lost!! : ", notice)
def on_direct_message(self, status):
try:
dm = status._json['direct_message']
self.send_passage(screen_name=dm['sender_screen_name'])
except BaseException as e:
print("Failed on_direct_message()", str(e))
pprint(status._json)
return True
def on_event(self, status):
print('Entered on_event()')
print(status)
def on_status(self, status):
print('Entered on_status()')
try:
self.send_passage(screen_name=status._json['user']['screen_name'])
except BaseException as e:
print("Failed on_status()", str(e))
pprint(status._json)
return True
def on_error(self, status):
print('Entered on_error()')
print(status)
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
auth = OAuthHandler(at['CONSUMER_KEY'], at['CONSUMER_SECRET'])
auth.secure = True
auth.set_access_token(at['ACCESS_KEY'], at['ACCESS_SECRET'])
api = API(auth)
# If the authentication was successful, you should
# see the name of the account print out
print(api.me().name)
stream = Stream(auth, HolyListener(api=api))
stream.userstream()
if __name__ == '__main__':
main()
| """Coordinates the twitter api with the markov chain models"""
import json
from pprint import pprint
from tweepy import Stream, OAuthHandler, API
from tweepy.streaming import StreamListener
from holy_markov import OldTestaPassagesMarkov
from twitter_secrets import api_tokens as at
class HolyListener(StreamListener):
old_testa = OldTestaPassagesMarkov()
bot_name = 'HolyStupidArt'
def send_passage(self, screen_name):
if screen_name != self.bot_name:
print('Passage sent to @' + screen_name)
passage = self.old_testa.twitter_message(line_length=(140 - len(screen_name) - 2))
tweet = ''.join(['@', screen_name, ' ', passage])
self.api.update_status(tweet)
def on_connect(self):
print("Connection established!!")
def on_disconnect(self, notice):
print("Connection lost!! : ", notice)
def on_direct_message(self, status):
try:
dm = status._json['direct_message']
self.send_passage(screen_name=dm['sender_screen_name'])
except BaseException as e:
print("Failed on_direct_message()", str(e))
pprint(status._json)
return True
def on_event(self, status):
print('Entered on_event()')
print(status)
def on_status(self, status):
print('Entered on_status()')
try:
self.send_passage(screen_name=status._json['user']['screen_name'])
except BaseException as e:
print("Failed on_status()", str(e))
pprint(status._json)
return True
def on_error(self, status):
print('Entered on_error()')
print(status)
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
try:
auth = OAuthHandler(at['CONSUMER_KEY'], at['CONSUMER_SECRET'])
auth.secure = True
auth.set_access_token(at['ACCESS_KEY'], at['ACCESS_SECRET'])
api = API(auth)
# If the authentication was successful, you should
# see the name of the account print out
print(api.me().name)
stream = Stream(auth, HolyListener(api=api))
stream.userstream()
except BaseException as e:
print("Error in main()", e)
if __name__ == '__main__':
main()
| mit | Python |
fde3d8d078d7ca45bd7db50b5c89c8f5f923d786 | Rewrite harmonic_centrality to better use iterator | tmilicic/networkx,Sixshaman/networkx,andnovar/networkx,OrkoHunter/networkx,goulu/networkx,michaelpacer/networkx,JamesClough/networkx,cmtm/networkx,jfinkels/networkx,NvanAdrichem/networkx,SanketDG/networkx | networkx/algorithms/centrality/harmonic.py | networkx/algorithms/centrality/harmonic.py | """
Harmonic centrality measure.
"""
# Copyright (C) 2015 by
# Alessandro Luongo
# BSD license.
from __future__ import division
import functools
import networkx as nx
__author__ = "\n".join(['Alessandro Luongo (alessandro.luongo@studenti.unimi.it'])
__all__ = ['harmonic_centrality']
def harmonic_centrality(G, distance=None):
"""Compute harmonic centrality for nodes.
Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
of the shortest path distances from all other nodes to `u`.
.. math::
C(u) = \sum_{v \neq u \epsilon G} \frac{1}{d(v, u)},
where `d(v, u)` is the shortest-path distance between `v` and `u`.
Notice that higher values indicate higher centrality.
Parameters
----------
G : graph
A NetworkX graph
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations. If `None`, then each edge will have distance equal to 1.
Returns
-------
nodes : dictionary
Dictionary of nodes with harmonic centrality as the value.
See Also
--------
betweenness_centrality, load_centrality, eigenvector_centrality,
degree_centrality, closeness_centrality
Notes
-----
If the 'distance' keyword is set to an edge attribute key then the
shortest-path length will be computed using Dijkstra's algorithm with
that edge attribute as the edge weight.
References
----------
.. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
Internet Mathematics 10.3-4 (2014): 222-262.
"""
if len(G) <= 1:
return {singleton: 0.0 for singleton in G.nodes()}
if G.is_directed():
G = G.reverse()
if distance is not None:
# use Dijkstra's algorithm with specified attribute as edge weight
sp = nx.all_pairs_dijkstra_path_length(G, weight=distance)
else:
sp = nx.all_pairs_shortest_path_length(G)
return {n: sum(1/d if d > 0 else 0 for d in dd.values()) for n, dd in sp}
| """
Harmonic centrality measure.
"""
# Copyright (C) 2015 by
# Alessandro Luongo
# BSD license.
from __future__ import division
import functools
import networkx as nx
__author__ = "\n".join(['Alessandro Luongo (alessandro.luongo@studenti.unimi.it'])
__all__ = ['harmonic_centrality']
def harmonic_centrality(G, distance=None):
"""Compute harmonic centrality for nodes.
Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
of the shortest path distances from all other nodes to `u`.
.. math::
C(u) = \sum_{v \neq u \epsilon G} \frac{1}{d(v, u)},
where `d(v, u)` is the shortest-path distance between `v` and `u`.
Notice that higher values indicate higher centrality.
Parameters
----------
G : graph
A NetworkX graph
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations. If `None`, then each edge will have distance equal to 1.
Returns
-------
nodes : dictionary
Dictionary of nodes with harmonic centrality as the value.
See Also
--------
betweenness_centrality, load_centrality, eigenvector_centrality,
degree_centrality, closeness_centrality
Notes
-----
If the 'distance' keyword is set to an edge attribute key then the
shortest-path length will be computed using Dijkstra's algorithm with
that edge attribute as the edge weight.
References
----------
.. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." Internet Mathematics 10.3-4 (2014): 222-262.
"""
if distance is not None:
# use Dijkstra's algorithm with specified attribute as edge weight
path_length = functools.partial(nx.all_pairs_dijkstra_path_length,
weight=distance)
else:
path_length = nx.all_pairs_shortest_path_length
nodes = G.nodes()
harmonic_centrality = {}
if len(G) <= 1:
for singleton in nodes:
harmonic_centrality[singleton] = 0.0
return harmonic_centrality
sp = path_length(G.reverse() if G.is_directed() else G)
for node, dist_dict in sp:
harmonic_centrality[node] = sum(
[1/dist if dist > 0 else 0 for dist in dist_dict.values()])
return harmonic_centrality
| bsd-3-clause | Python |
c18884b10f345a8a094a3c4bf589888027d43bd5 | Remove url inlude for Django 2.0 | gunthercox/ChatterBot,vkosuri/ChatterBot | examples/django_app/example_app/urls.py | examples/django_app/example_app/urls.py | from django.conf.urls import url
from django.contrib import admin
from example_app.views import ChatterBotAppView, ChatterBotApiView
urlpatterns = [
url(r'^$', ChatterBotAppView.as_view(), name='main'),
url(r'^admin/', admin.site.urls, name='admin'),
url(r'^api/chatterbot/', ChatterBotApiView.as_view(), name='chatterbot'),
]
| from django.conf.urls import include, url
from django.contrib import admin
from example_app.views import ChatterBotAppView, ChatterBotApiView
urlpatterns = [
url(r'^$', ChatterBotAppView.as_view(), name='main'),
url(r'^admin/', include(admin.site.urls), name='admin'),
url(r'^api/chatterbot/', ChatterBotApiView.as_view(), name='chatterbot'),
]
| bsd-3-clause | Python |
68f24127a3f2ff31176301b624352ecffcd4fa15 | Add created and updated fields when a post is created, also extends update from basemodel | oldani/nanodegree-blog,oldani/nanodegree-blog,oldani/nanodegree-blog | app/models/post.py | app/models/post.py | from datetime import datetime
from flask_user import current_user
from .base import BaseModel
class Post(BaseModel):
def __init__(self, **kwargs):
# Add a comment list field
self.created = self.updated = datetime.now()
self.comment_list = []
super().__init__(**kwargs)
def add_comment(self, comment_id):
""" Append a comment id to a comment_list field or
create it if it is the firt one. """
if not hasattr(self, 'comment_list'):
self.comment_list = []
self.comment_list.append(comment_id)
self.update()
def delete_comment(self, comment_id):
""" Delete a given comment id from commnet list. """
self.comment_list.remove(int(comment_id))
self.update()
def update(self, **kwargs):
""" Extend BaseModel method. """
self.updated = datetime.now()
super().update(**kwargs)
@classmethod
def delete(cls, entity_id):
post = cls.get(entity_id)
if post:
if post.comment_list:
cls.delete_multi(post.comment_list, kind='Comment')
super().delete(entity_id)
current_user.posts_list.remove(int(entity_id))
current_user.put()
return True
return
| from .base import BaseModel
from flask_user import current_user
class Post(BaseModel):
def __init__(self, **kwargs):
# Add a comment list field
self.comment_list = []
super().__init__(**kwargs)
def add_comment(self, comment_id):
""" Append a comment id to a comment_list field or
create it if it is the firt one. """
if not hasattr(self, 'comment_list'):
self.comment_list = []
self.comment_list.append(comment_id)
self.update()
def delete_comment(self, comment_id):
""" Delete a given comment id from commnet list. """
self.comment_list.remove(int(comment_id))
self.update()
@classmethod
def delete(cls, entity_id):
post = cls.get(entity_id)
if post:
if post.comment_list:
cls.delete_multi(post.comment_list, kind='Comment')
super().delete(entity_id)
current_user.posts_list.remove(int(entity_id))
current_user.put()
return True
return
| mit | Python |
d7e02bf6a36a19dad268deae94757a21447b8220 | remove the executable bit and the hashbang in the python script | OpenFAST/r-test,OpenFAST/r-test | runCertTestsLocally.py | runCertTestsLocally.py | """
This script runs all of the CertTest cases to create a local 'gold standard'
set of solutions.
"""
import os
import sys
import shutil
import subprocess
# if the local output directory already exists, bail for two reasons
# 1. don't silenty overwrite previous outputs
# 2. the python filesystem methods arent robust enough to do something like 'cp * .'
localDirectory = "outputs-local"
if os.path.exists(localDirectory):
print "The local output directory, {}, already exists.".format(localDirectory)
sys.exit(1)
# get the input files from /inputs
shutil.copytree("inputs", "{}".format(localDirectory))
# run through each case
os.chdir("{}".format(localDirectory))
num = [str(i).zfill(2) for i in range(1,3)]
for n in num:
caseName = "Test{}".format(n)
command = "openfast {}.fst > {}.log".format(caseName, caseName)
print "'{}' - running".format(command)
return_code = subprocess.call(command, shell=True)
print "'{}' - finished with exit code {}".format(command, return_code)
| #!/usr/bin/python
"""
This script runs all of the CertTest cases to create a local 'gold standard'
set of solutions.
"""
import os
import sys
import shutil
import subprocess
# if the local output directory already exists, bail for two reasons
# 1. don't silenty overwrite previous outputs
# 2. the python filesystem methods arent robust enough to do something like 'cp * .'
localDirectory = "outputs-local"
if os.path.exists(localDirectory):
print "The local output directory, {}, already exists.".format(localDirectory)
sys.exit(1)
# get the input files from /inputs
shutil.copytree("inputs", "{}".format(localDirectory))
# run through each case
os.chdir("{}".format(localDirectory))
num = [str(i).zfill(2) for i in range(1,3)]
for n in num:
caseName = "Test{}".format(n)
command = "openfast {}.fst > {}.log".format(caseName, caseName)
print "'{}' - running".format(command)
return_code = subprocess.call(command, shell=True)
print "'{}' - finished with exit code {}".format(command, return_code)
| apache-2.0 | Python |
780a7a3880bcfc1b54da5b06273ffc8a05abd94c | Replace `ExampleSerializer` by `features.serialize_example` | google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research | tf3d/datasets/utils/example_parser.py | tf3d/datasets/utils/example_parser.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsing library for tf.train.Example protos."""
import tensorflow_datasets as tfds
def decode_serialized_example(serialized_example, features):
"""Decodes a serialized Example proto as dictionary of tensorflow tensors.
Args:
serialized_example: serialized tf.train.Example proto.
features: tfds.features.FeatureConnector which provides the decoding
specification.
Returns:
Decoded tf.Tensor or dictionary of tf.Tensor, stored in the Example proto.
"""
return features.deserialize_example(serialized_example)
def decode_serialized_example_as_numpy(serialized_example, features):
"""Decodes a serialized Example proto as dictionary of numpy tensors.
Note: This function is eager only.
Args:
serialized_example: serialized tf.train.Example proto.
features: tfds.features.FeatureConnector which provides the decoding
specification.
Returns:
Decoded tf.Tensor or dictionary of tf.Tensor, stored in the Example proto.
"""
tensor_dict = decode_serialized_example(serialized_example, features)
return tfds.core.utils.map_nested(lambda x: x.numpy(), tensor_dict)
def encode_serialized_example(example_data, features):
"""Encode the feature dict into a tf.train.Eexample proto string.
The input example_data can be anything that the provided feature specification
can consume.
Example:
example_data = {
'image': 'path/to/img.png',
'rotation': np.eye(3, dtype=np.float32)
}
features={
'image': tfds.features.Image(),
'rotation': tfds.features.Tensor(shape=(3,3), dtype=tf.float64),
}
example_proto_string = encode_serialized_example(example_data, features)
Args:
example_data: Value or dictionary of feature values to convert into Example
proto.
features: tfds.features.FeatureConnector which provides the encoding
specification.
Returns:
Serialized Example proto storing encoded example_data as per specification
provided by features.
"""
return features.serialize_example(example_data)
| # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsing library for tf.train.Example protos."""
import tensorflow_datasets as tfds
def decode_serialized_example(serialized_example, features):
"""Decodes a serialized Example proto as dictionary of tensorflow tensors.
Args:
serialized_example: serialized tf.train.Example proto.
features: tfds.features.FeatureConnector which provides the decoding
specification.
Returns:
Decoded tf.Tensor or dictionary of tf.Tensor, stored in the Example proto.
"""
example_specs = features.get_serialized_info()
parser = tfds.core.example_parser.ExampleParser(example_specs)
tfexample_data = parser.parse_example(serialized_example)
return features.decode_example(tfexample_data)
def decode_serialized_example_as_numpy(serialized_example, features):
"""Decodes a serialized Example proto as dictionary of numpy tensors.
Note: This function is eager only.
Args:
serialized_example: serialized tf.train.Example proto.
features: tfds.features.FeatureConnector which provides the decoding
specification.
Returns:
Decoded tf.Tensor or dictionary of tf.Tensor, stored in the Example proto.
"""
tensor_dict = decode_serialized_example(serialized_example, features)
return tfds.core.utils.map_nested(lambda x: x.numpy(), tensor_dict)
def encode_serialized_example(example_data, features):
"""Encode the feature dict into a tf.train.Eexample proto string.
The input example_data can be anything that the provided feature specification
can consume.
Example:
example_data = {
'image': 'path/to/img.png',
'rotation': np.eye(3, dtype=np.float32)
}
features={
'image': tfds.features.Image(),
'rotation': tfds.features.Tensor(shape=(3,3), dtype=tf.float64),
}
example_proto_string = encode_serialized_example(example_data, features)
Args:
example_data: Value or dictionary of feature values to convert into Example
proto.
features: tfds.features.FeatureConnector which provides the encoding
specification.
Returns:
Serialized Example proto storing encoded example_data as per specification
provided by features.
"""
encoded = features.encode_example(example_data)
example_specs = features.get_serialized_info()
serializer = tfds.core.example_serializer.ExampleSerializer(example_specs)
return serializer.serialize_example(encoded)
| apache-2.0 | Python |
607cc13eb791f763c4d34256568ef5f45177ca3b | Revise typo for fibonacci_memo() and use generator | bowen0701/algorithms_data_structures | alg_fibonacci.py | alg_fibonacci.py | """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def fibonacci_recur(n):
"""Get the nth number of Fibonacci series, Fn, by recursion.
- Time complexity: 2Fn - 1 = O(Fn); too fast.
- Space complexity: O(n).
"""
if n <= 1:
return n
else:
return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
def fibonacci_memo(n):
"""Get the nth number of Fibonacci series, Fn, by memorization.
- Time complexity: O(n).
- Space complexity: O(n).
"""
fn_d = {}
fn_d[0] = 0
fn_d[1] = 1
for n in xrange(2, n + 1):
fn_d[n] = fn_d[n - 1] + fn_d[n - 2]
return fn_d[n]
def fibonacci_dp(n):
"""Get the nth number of Fibonacci series by dynamic programming.
- Time complexity is still O(n), like fibonacci_memo().
- Space complexity is O(1), improving a lot.
"""
a, b = 0, 1
for _ in xrange(n):
a, b = a + b, a
return a
def main():
import time
n = 30
print('{}th number of Fibonacci series:'.format(n))
start_time = time.time()
print('By recursion: {}'.format(fibonacci_recur(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By memorization: {}'.format(fibonacci_memo(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By dynamic programming: {}'.format(fibonacci_dp(n)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def fibonacci_recur(n):
"""Get the nth number of Fibonacci series, Fn, by recursion.
- Time complexity: 2Fn - 1 = O(Fn); too fast.
- Space complexity: O(n).
"""
if n <= 1:
return n
else:
return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
def fibonacci_memo(n):
"""Get the nth number of Fibonacci series, Fn, by memorization.
- Time complexity: O(n).
- Space complexity: O(n).
"""
fn_d = {}
fn_d[0] = 0
fn_d[1] = 1
for n in range(2, n):
fn_d[n] = fn_d[n - 1] + fn_d[n - 2]
return fn_d[n]
def fibonacci_dp(n):
"""Get the nth number of Fibonacci series by dynamic programming.
- Time complexity is still O(n), like fibonacci_memo().
- Space complexity is O(1), improving a lot.
"""
a, b = 0, 1
for _ in xrange(n):
a, b = a + b, a
return a
def main():
import time
n = 30
print('{}th number of Fibonacci series:'.format(n))
start_time = time.time()
print('By recursion: {}'.format(fibonacci_recur(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By memorization: {}'.format(fibonacci_memo(n)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By dynamic programming: {}'.format(fibonacci_dp(n)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
369d0e1a0040f7f4331cb7607dbaaaa8ab9d6f96 | Fix response with 504 code when SF is off-line | ZachMassia/platformio,bkudria/platformio,jrobeson/platformio,jrobeson/platformio,mseroczynski/platformio,dkuku/platformio,eiginn/platformio,platformio/platformio-core,atyenoria/platformio,mplewis/platformio,jrobeson/platformio,platformio/platformio-core,jrobeson/platformio,valeros/platformio,bkudria/platformio,platformio/platformio,bkudria/platformio,bkudria/platformio,mcanthony/platformio | tests/test_pkgmanifest.py | tests/test_pkgmanifest.py | # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import pytest
import requests
from os.path import basename
from platformio.util import get_api_result
@pytest.fixture(scope="session")
def sfpkglist():
result = None
r = None
try:
r = requests.get("http://sourceforge.net/projects"
"/platformio-storage/files/packages/list")
result = r.json()
r.raise_for_status()
except:
pass
finally:
if r:
r.close()
return result
def pytest_generate_tests(metafunc):
if "package_data" not in metafunc.fixturenames:
return
pkgs_manifest = get_api_result("/packages/manifest")
assert isinstance(pkgs_manifest, dict)
packages = []
for _, variants in pkgs_manifest.iteritems():
for item in variants:
packages.append(item)
metafunc.parametrize("package_data", packages)
def validate_response(req):
assert req.status_code == 200
assert int(req.headers['Content-Length']) > 0
def validate_package(url, sfpkglist):
r = requests.head(url, allow_redirects=True)
validate_response(r)
assert r.headers['Content-Type'] in ("application/x-gzip",
"application/octet-stream")
def test_package(package_data, sfpkglist):
assert package_data['url'].endswith("%d.tar.gz" % package_data['version'])
# check content type and that file exists
try:
r = requests.head(package_data['url'], allow_redirects=True)
if r.status_code == 504:
raise requests.exceptions.ConnectionError()
except requests.exceptions.ConnectionError:
return pytest.skip("SF is off-line")
validate_response(r)
assert r.headers['Content-Type'] in ("application/x-gzip",
"application/octet-stream")
# check sha1 sum
if sfpkglist is None:
return pytest.skip("SF is off-line")
pkgname = basename(package_data['url'])
assert pkgname in sfpkglist
assert package_data['sha1'] == sfpkglist.get(pkgname, {}).get("sha1")
| # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import pytest
import requests
from os.path import basename
from platformio.util import get_api_result
@pytest.fixture(scope="session")
def sfpkglist():
result = None
r = None
try:
r = requests.get("http://sourceforge.net/projects"
"/platformio-storage/files/packages/list")
result = r.json()
r.raise_for_status()
except:
pass
finally:
if r:
r.close()
return result
def pytest_generate_tests(metafunc):
if "package_data" not in metafunc.fixturenames:
return
pkgs_manifest = get_api_result("/packages/manifest")
assert isinstance(pkgs_manifest, dict)
packages = []
for _, variants in pkgs_manifest.iteritems():
for item in variants:
packages.append(item)
metafunc.parametrize("package_data", packages)
def validate_response(req):
assert req.status_code == 200
assert int(req.headers['Content-Length']) > 0
def validate_package(url, sfpkglist):
r = requests.head(url, allow_redirects=True)
validate_response(r)
assert r.headers['Content-Type'] in ("application/x-gzip",
"application/octet-stream")
def test_package(package_data, sfpkglist):
assert package_data['url'].endswith("%d.tar.gz" % package_data['version'])
# check content type and that file exists
try:
r = requests.head(package_data['url'], allow_redirects=True)
except requests.exceptions.ConnectionError:
return pytest.skip("SF is off-line")
validate_response(r)
assert r.headers['Content-Type'] in ("application/x-gzip",
"application/octet-stream")
# check sha1 sum
if sfpkglist is None:
return pytest.skip("SF is off-line")
pkgname = basename(package_data['url'])
assert pkgname in sfpkglist
assert package_data['sha1'] == sfpkglist.get(pkgname, {}).get("sha1")
| apache-2.0 | Python |
06ed4fe212b0b35056c931aab0b084d2276ee79f | Use allOnlyFields instead of roundabout way, to remain atomic | kata198/indexedredis,kata198/indexedredis | IndexedRedis/compat_convert.py | IndexedRedis/compat_convert.py | # Copyright (c) 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# Some conversion functions
import copy
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
def compat_convertPickleFields(mdlClass):
'''
compat_convertPickleFields - Convert pickle fields on given model from the old format to the new format.
This is not threadsafe, should be run while things are not in motion. Will only affect the pickle-type fields.
This function expects that all objects are either old format or new format, does not handle mixed.
@param mdlClass - <IndexedRedis.IndexedRedisModel> - The Model
'''
from IndexedRedis.fields import IRPickleField
from IndexedRedis.fields.compat_pickle import IRCompatPickleField
pickleFields = [field for field in mdlClass.FIELDS if issubclass(field.__class__, IRPickleField)]
if not pickleFields:
return
pickleFieldsStr = [str(field) for field in pickleFields]
oldFields = copy.copy(mdlClass.FIELDS)
# Switch to old compat type for fetch
for i in range(len(oldFields)):
if issubclass(oldFields[i].__class__, IRPickleField):
mdlClass.FIELDS[i] = IRCompatPickleField(str(oldFields[i]))
partialObjs = mdlClass.objects.allOnlyFields(pickleFieldsStr)
# Restore fields
mdlClass.FIELDS = copy.copy(oldFields)
for partialObj in partialObjs:
for fieldName in pickleFieldsStr:
partialObj._origData[fieldName] = '________UNSET#%$@#$%rfwesdv'
# Save using new class
mdlClass.saver.save(partialObjs)
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
| # Copyright (c) 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# Some conversion functions
import copy
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
# TODO: Add a "filterFetchOnlyFields" helper method
def compat_convertPickleFields(mdlClass):
'''
compat_convertPickleFields - Convert pickle fields on given model from the old format to the new format.
This is not threadsafe, should be run while things are not in motion. Will only affect the pickle-type fields.
This function expects that all objects are either old format or new format, does not handle mixed.
@param mdlClass - <IndexedRedis.IndexedRedisModel> - The Model
'''
from IndexedRedis.fields import IRPickleField
from IndexedRedis.fields.compat_pickle import IRCompatPickleField
pickleFields = [field for field in mdlClass.FIELDS if issubclass(field.__class__, IRPickleField)]
if not pickleFields:
return
pickleFieldsStr = [str(field) for field in pickleFields]
allPks = mdlClass.objects.getPrimaryKeys()
oldFields = copy.copy(mdlClass.FIELDS)
# Switch to old compat type for fetch
for i in range(len(oldFields)):
if issubclass(oldFields[i].__class__, IRPickleField):
mdlClass.FIELDS[i] = IRCompatPickleField(str(oldFields[i]))
partialObjs = mdlClass.objects.getMultipleOnlyFields(allPks, pickleFieldsStr)
# Restore fields
mdlClass.FIELDS = copy.copy(oldFields)
for partialObj in partialObjs:
for fieldName in pickleFieldsStr:
partialObj._origData[fieldName] = '________UNSET#%$@#$%rfwesdv'
# Save using new class
mdlClass.saver.save(partialObjs)
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
| lgpl-2.1 | Python |
c709cba9f84fa15b50819b5e77ab7cc66db8d647 | simplify output | buganini/bsdconv,buganini/bsdconv,buganini/bsdconv,buganini/bsdconv | tools/findAinB.py | tools/findAinB.py | import sys
import re
sep = re.compile(r"\s+")
stp = re.compile(r"^0[xX]")
fa = open(sys.argv[1])
fb = open(sys.argv[2])
la = {}
lb = {}
for f,l in ((fa, la), (fb, lb)):
for ln in f:
ln = ln.strip().upper()
if ln == "":
continue
if ln.startswith("#"):
continue
a = sep.split(ln)
p = stp.sub("", a[0])
l[p]=1
allnotin = True
allin = True
for k in la:
if k in lb:
print(k)
allnotin = False
else:
allin = False
if allin:
print("All In")
elif allnotin:
print("All Not In")
else:
print("Not All In")
| import sys
import re
sep = re.compile(r"\s+")
stp = re.compile(r"^0[xX]")
fa = open(sys.argv[1])
fb = open(sys.argv[2])
la = {}
lb = {}
for f,l in ((fa, la), (fb, lb)):
for ln in f:
ln = ln.strip().upper()
if ln == "":
continue
if ln.startswith("#"):
continue
a = sep.split(ln)
p = stp.sub("", a[0])
l[p]=1
allnotin = True
allin = True
for k in la:
if k in lb:
print(k)
allnotin = False
else:
allin = False
if allin:
print("All In")
else:
print("Not All In")
if allnotin:
print("All Not In")
| bsd-2-clause | Python |
78659ab199fef51cf8dbe15cbc2e7ed08b78eba9 | Add video search | moscowpython/moscowpython,moscowpython/moscowpython,VladimirFilonov/moscowdjango,VladimirFilonov/moscowdjango,moscowpython/moscowpython,moscowdjango/moscowdjango,moscowdjango/moscowdjango,VladimirFilonov/moscowdjango,moscowdjango/moscowdjango | apps/meetup/admin.py | apps/meetup/admin.py | # coding: utf-8
from django.contrib import admin
from .forms import EventAdminForm
from .models import Photo, Venue, MediaCoverage, Talk, Sponsor, Speaker, \
Event, Tutorial, Vote
def oembed_presentation(obj):
return bool(obj.presentation_data)
oembed_presentation.short_description = u'Слайды'
oembed_presentation.boolean = True
def oembed_video(obj):
return bool(obj.video_data)
oembed_video.short_description = u'Видео'
oembed_video.boolean = True
def preview(obj):
return '<img src=%s style="height:100px">' % obj.get_absolute_url()
preview.allow_tags = True
class TalkAdmin(admin.ModelAdmin):
list_display = ['__str__', 'position', 'speaker', 'status', oembed_presentation, oembed_video, 'event']
list_editable = ['position']
list_filter = ['event']
readonly_fields = ['presentation_data', 'video_data']
search_fields = ['name']
ordering = ['-event__pk', 'position']
class PhotoInline(admin.TabularInline):
model = Photo
class MediaCoverageInline(admin.TabularInline):
model = MediaCoverage
class EventAdmin(admin.ModelAdmin):
form = EventAdminForm
list_display = ['__str__', 'date', 'venue', 'status']
list_editable = ['status']
exclude = ['status_changed']
inlines = [PhotoInline, MediaCoverageInline]
class VenueAdmin(admin.ModelAdmin):
list_display = ['__str__', 'address']
class PhotoAdmin(admin.ModelAdmin):
list_display = ['__str__', preview, 'event', 'caption']
list_editable = ['caption']
list_per_page = 10
ordering = ['-id']
def photo_preview(obj):
return '<img src=%s style="height:50px">' % obj.avatar_url
photo_preview.allow_tags = True
class SpeakerAdmin(admin.ModelAdmin):
list_display = ['__str__', photo_preview, 'slug',]
list_editable = ['slug']
def logo_preview(obj):
return '<img src=%s width=150>' % obj.logo.url
logo_preview.allow_tags = True
class SponsorAdmin(admin.ModelAdmin):
list_display = ['__str__', logo_preview, 'url',]
list_editable = ['url']
class TutorialAdmin(admin.ModelAdmin):
pass
class MediaCoverageAdmin(admin.ModelAdmin):
list_display = ['__str__', 'event']
list_filter = ['event']
ordering = ['-event__pk', 'id']
admin.site.register(Talk, TalkAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Venue, VenueAdmin)
admin.site.register(Speaker, SpeakerAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(Sponsor, SponsorAdmin)
admin.site.register(MediaCoverage, MediaCoverageAdmin)
admin.site.register(Tutorial, TutorialAdmin)
admin.site.register(Vote)
| # coding: utf-8
from django.contrib import admin
from .forms import EventAdminForm
from .models import Photo, Venue, MediaCoverage, Talk, Sponsor, Speaker, \
Event, Tutorial, Vote
def oembed_presentation(obj):
return bool(obj.presentation_data)
oembed_presentation.short_description = u'Слайды'
oembed_presentation.boolean = True
def oembed_video(obj):
return bool(obj.video_data)
oembed_video.short_description = u'Видео'
oembed_video.boolean = True
def preview(obj):
return '<img src=%s style="height:100px">' % obj.get_absolute_url()
preview.allow_tags = True
class TalkAdmin(admin.ModelAdmin):
list_display = ['__str__', 'position', 'speaker', 'status', oembed_presentation, oembed_video, 'event']
list_editable = ['position']
list_filter = ['event']
readonly_fields = ['presentation_data', 'video_data']
ordering = ['-event__pk', 'position']
class PhotoInline(admin.TabularInline):
model = Photo
class MediaCoverageInline(admin.TabularInline):
model = MediaCoverage
class EventAdmin(admin.ModelAdmin):
form = EventAdminForm
list_display = ['__str__', 'date', 'venue', 'status']
list_editable = ['status']
exclude = ['status_changed']
inlines = [PhotoInline, MediaCoverageInline]
class VenueAdmin(admin.ModelAdmin):
list_display = ['__str__', 'address']
class PhotoAdmin(admin.ModelAdmin):
list_display = ['__str__', preview, 'event', 'caption']
list_editable = ['caption']
list_per_page = 10
ordering = ['-id']
def photo_preview(obj):
return '<img src=%s style="height:50px">' % obj.avatar_url
photo_preview.allow_tags = True
class SpeakerAdmin(admin.ModelAdmin):
list_display = ['__str__', photo_preview, 'slug',]
list_editable = ['slug']
def logo_preview(obj):
return '<img src=%s width=150>' % obj.logo.url
logo_preview.allow_tags = True
class SponsorAdmin(admin.ModelAdmin):
list_display = ['__str__', logo_preview, 'url',]
list_editable = ['url']
class TutorialAdmin(admin.ModelAdmin):
pass
class MediaCoverageAdmin(admin.ModelAdmin):
list_display = ['__str__', 'event']
list_filter = ['event']
ordering = ['-event__pk', 'id']
admin.site.register(Talk, TalkAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Venue, VenueAdmin)
admin.site.register(Speaker, SpeakerAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(Sponsor, SponsorAdmin)
admin.site.register(MediaCoverage, MediaCoverageAdmin)
admin.site.register(Tutorial, TutorialAdmin)
admin.site.register(Vote)
| bsd-3-clause | Python |
2a646f248e3a9e3d6ca1ffe1c9594af3b597dcb1 | Add edge cases for undo-push | zhangela/git-undo | wrapper.py | wrapper.py | import subprocess
import os
import sys
import sqlite3
import hashlib
import time
prompt = '> '
# strip new line
repo_path = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip()
# folder to store all settings and backups
common_path = os.path.expanduser("~/Library/Application Support/git-undo/")
# make sure the settings and backups folder exists
if not os.path.isdir(common_path):
os.mkdir(common_path)
conn = sqlite3.connect(common_path + 'gitundo.db')
cursor = conn.cursor()
# Create table
cursor.execute('''CREATE TABLE IF NOT EXISTS backups
(backupid integer primary key autoincrement, repo_path text, created_at timestamp, git_command text)''')
created_at = int(time.time() * 1000)
git_command = "git " + " ".join(sys.argv[1:])
cursor.execute('''INSERT INTO backups (repo_path, created_at, git_command) VALUES (?, ?, ?)''',
(repo_path, created_at, git_command))
backupid = cursor.lastrowid
print "backed up with id: " + str(backupid)
sys.stdout.flush()
conn.commit()
conn.close()
# returns commit id of the previous commit
def getLastCommit():
counter = 2
x = subprocess.check_output(["git"]+["log"])
y = x.split('\n')
for i in y:
temp = i.split()
if temp==[]:
continue
elif (temp[0]=="commit"):
counter-=1
if counter==0:
return temp[1]
return False
# returns commit id latest commit
def getCurrentCommit():
x = subprocess.check_output(["git"]+["log"])
y = x.split('\n')
for i in y:
temp = i.split()
if (temp[0]=="commit"):
return temp[1]
return False
# returns curent branch
def getBranch():
x = subprocess.check_output(["git"]+["branch"])
y = x.split('\n')
for i in y:
if (i[:1]=="*"):
return i[2:]
return False
# undos push, as noted by http://stackoverflow.com/questions/1270514/undoing-a-git-push
def undoPush():
# if system.denyNonFastForwards and denyDeletes:
if False:
subprocess.call(["git","update-ref","refs/heads/"+getBranch(),getLastCommit(),getCurrentCommit()])
# elif system.denyNonFastForwards and master is not the only branch
elif False:
print("")
# elif system.denyNonFastForwards
elif False:
subprocess.call(["git","push","origin",":"+getBranch()])
subprocess.call(["git","push","origin",getLastCommit()+":refs/heads/"+getBranch()])
# else
else:
subprocess.call(["git","push","-f","origin",getLastCommit()+":"+getBranch()])
## Main Code
if (sys.argv[1] == "push"):
undoPush()
elif (sys.argv[1] == "test"):
print("tester")
else:
subprocess.call(["git"] + sys.argv[1:])
## Code for prompts
# print("Your repository has a denyNonFastForward flag on, so the history \
# cannot be overwritten. The undo-push will be written into the git history.\
# Is that alright? (Y/N)")
# ans = raw_input(prompt)
# if (ans.lower()=="y" or ans.lower()=="yes"):
# elif (ans.lower()=="n" or ans.lower()=="no"):
# else:
# raise ValueError("Sorry bro I have no idea what you're saying. Bye.")
| import subprocess
import os
import sys
import sqlite3
import hashlib
import time
prompt = '> '
# strip new line
repo_path = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip()
# folder to store all settings and backups
common_path = os.path.expanduser("~/Library/Application Support/git-undo/")
# make sure the settings and backups folder exists
if not os.path.isdir(common_path):
os.mkdir(common_path)
conn = sqlite3.connect(common_path + 'gitundo.db')
cursor = conn.cursor()
# Create table
cursor.execute('''CREATE TABLE IF NOT EXISTS backups
(backupid integer primary key autoincrement, repo_path text, created_at timestamp, git_command text)''')
created_at = int(time.time() * 1000)
git_command = "git " + " ".join(sys.argv[1:])
cursor.execute('''INSERT INTO backups (repo_path, created_at, git_command) VALUES (?, ?, ?)''',
(repo_path, created_at, git_command))
backupid = cursor.lastrowid
print "backed up with id: " + str(backupid)
sys.stdout.flush()
conn.commit()
conn.close()
# returns commit id of the previous commit
def getLastCommit():
counter = 2
x = subprocess.check_output(["git"]+["log"])
y = x.split('\n')
for i in y:
temp = i.split()
if temp==[]:
continue
elif (i.split()[0]=="commit"):
counter-=1
if counter==0:
return i.split()[1]
return False
# returns curent branch
def getBranch():
x = subprocess.check_output(["git"]+["branch"])
y = x.split('\n')
for i in y:
if (i[:1]=="*"):
return i[2:]
return False
# undos push
def undoPush():
# if system.denyNonFastForwards and denyDeletes:
if False:
print("")
# elif system.denyNonFastForwards and master is not the only branch
elif False:
print("")
# elif system.denyNonFastForwards
elif False:
print("sorry we don't support you yet.")
# else
else:
subprocess.call(["git","push","-f","origin",getLastCommit()+":"+getBranch()])
## Main Code
if (sys.argv[1] == "push"):
undoPush()
elif (sys.argv[1] == "test"):
print("tester")
else:
subprocess.call(["git"] + sys.argv[1:])
## Code for prompts
# print("Your repository has a denyNonFastForward flag on, so the history \
# cannot be overwritten. The undo-push will be written into the git history.\
# Is that alright? (Y/N)")
# ans = raw_input(prompt)
# if (ans.lower()=="y" or ans.lower()=="yes"):
# elif (ans.lower()=="n" or ans.lower()=="no"):
# else:
# raise ValueError("Sorry bro I have no idea what you're saying. Bye.")
| mit | Python |
7df8fc65fe7130a27aa85d9f88c32d0d50e93aff | Fix the mobilenet_ssd_quant_test expected result | iree-org/iree-samples,iree-org/iree-samples,iree-org/iree-samples,iree-org/iree-samples | tflitehub/mobilenet_ssd_quant_test.py | tflitehub/mobilenet_ssd_quant_test.py | # RUN: %PYTHON %s
import absl.testing
import numpy
import os
import test_util
import urllib.request
from PIL import Image
# Model from https://github.com/google-coral/test_data/raw/master/ssd_mobilenet_v2_face_quant_postprocess.tflite
# but trimmed the final TFLite_PostProcess op.
model_path = "https://storage.googleapis.com/iree-shared-files/models/ssd_mobilenet_v2_face_quant.tflite"
class MobilenetSsdQuantTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(MobilenetSsdQuantTest, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(MobilenetSsdQuantTest, self).compare_results(iree_results, tflite_results, details)
self.assertTrue(numpy.isclose(iree_results[0], tflite_results[0], atol=1.0).all())
def generate_inputs(self, input_details):
img_path = "https://github.com/google-coral/test_data/raw/master/grace_hopper.bmp"
local_path = "/".join([self.workdir, "grace_hopper.bmp"])
urllib.request.urlretrieve(img_path, local_path)
shape = input_details[0]["shape"]
im = numpy.array(Image.open(local_path).resize((shape[1], shape[2])))
args = [im.reshape(shape)]
return args
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
| # RUN: %PYTHON %s
# XFAIL: *
import absl.testing
import numpy
import os
import test_util
import urllib.request
from PIL import Image
# Model from https://github.com/google-coral/test_data/raw/master/ssd_mobilenet_v2_face_quant_postprocess.tflite
# but trimmed the final TFLite_PostProcess op.
model_path = "https://storage.googleapis.com/iree-shared-files/models/ssd_mobilenet_v2_face_quant.tflite"
class MobilenetSsdQuantTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(MobilenetSsdQuantTest, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(MobilenetSsdQuantTest, self).compare_results(iree_results, tflite_results, details)
self.assertTrue(numpy.isclose(iree_results[0], tflite_results[0], atol=1.0).all())
def generate_inputs(self, input_details):
img_path = "https://github.com/google-coral/test_data/raw/master/grace_hopper.bmp"
local_path = "/".join([self.workdir, "grace_hopper.bmp"])
urllib.request.urlretrieve(img_path, local_path)
shape = input_details[0]["shape"]
im = numpy.array(Image.open(local_path).resize((shape[1], shape[2])))
args = [im.reshape(shape)]
return args
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
| apache-2.0 | Python |
e7f63f1efd482eb42d8941a6748ed217c69b4c3c | Add polls.models.Poll and polls.models.Choice | datphan/teracy-tutorial,teracyhq/django-tutorial | apps/polls/models.py | apps/polls/models.py | from django.db import models
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
| from django.db import models
# Create your models here.
| bsd-3-clause | Python |
8a750370c70c53822da4420caf3d3090544ff7d0 | Fix static analysis issue | lpenz/anac-civ-csv-upload | anac/__init__.py | anac/__init__.py | # encoding: utf-8
'''Interface com a ANAC'''
import logging
import re
from netrc import netrc
from mechanize import Browser
def _log():
if not _log.logger:
_log.logger = logging.getLogger()
return _log.logger
_log.logger = None
class Anac(object):
def __init__(self, dryrun):
self.dryrun = dryrun
self.br = Browser()
self.br.set_handle_robots(False)
self.br.open('https://sistemas.anac.gov.br/SACI/')
self.br.select_form(nr=0)
self.host = 'sistemas.anac.gov.br'
a = netrc().authenticators(self.host)
if a is None:
_log().error('Usuario e senha para %s'
'não encontrado no ~/.netrc' % self.host)
self.br.form['txtLogin'] = a[0]
self.br.form['txtSenha'] = a[2]
r = self.br.submit()
m = re.search("(\/SACI\/CIV\/Digital\/incluir\.asp[^']+)'", r.read())
self.url = 'https://' + self.host + m.group(1)
def add(self, dat):
self.br.open(self.url)
self.br.select_form(nr=0)
self.br.form.set_all_readonly(False)
for k, v in dat.items():
self.br.form[k] = v
_log().debug('Dados do formulário: ' + str(self.br.form))
if self.dryrun:
_log().warn('dryrun')
return
r = self.br.submit().read()
if 'sucesso' in r:
return
_log().warn(r)
| # encoding: utf-8
'''Interface com a ANAC'''
import logging
import re
from netrc import netrc
from mechanize import Browser
def _log():
if not _log.logger:
_log.logger = logging.getLogger()
return _log.logger
_log.logger = None
class Anac(object):
def __init__(self, dryrun):
self.dryrun = dryrun
self.br = Browser()
self.br.set_handle_robots(False)
self.br.open('https://sistemas.anac.gov.br/SACI/')
self.br.select_form(nr=0)
self.host = 'sistemas.anac.gov.br'
a = netrc().authenticators(self.host)
if a is None:
_log().error('Usuario e senha para %s'
'não encontrado no ~/.netrc' % self.host)
self.br.form['txtLogin'] = a[0]
self.br.form['txtSenha'] = a[2]
r = self.br.submit()
m = re.search("(\/SACI\/CIV\/Digital\/incluir\.asp[^']+)'", r.read())
self.url = 'https://' + self.host + m.group(1)
def add(self, dat):
self.br.open(self.url)
self.br.select_form(nr=0)
self.br.form.set_all_readonly(False)
for k, v in dat.items():
self.br.form[k] = v
_log().debug('Dados do formulário: ' + str(self.br.form))
if self.dryrun:
_log().warn('dryrun')
return
r = self.br.submit().read()
if 'sucesso' in r:
return
_log().warn(r)
| apache-2.0 | Python |
3243f199fb46d2d6f95ae9afd18b1570f9b5f529 | Fix up bad last commit | SingingTree/AStatsScraper,SingingTree/AStatsScraper | astatsscraper/parsing.py | astatsscraper/parsing.py | def parse_app_page(response):
# Should always be able to grab a title
title = response.xpath('//div[@class = "panel panel-default panel-gameinfo"]/div[@class = "panel-heading"]/text()').extract()[0].strip()
# Parse times into floats
time_to_hundo = response.xpath('//table[@class = "Default1000"]/tr/td[span = "Hours to 100%"]/text()[last()]').extract()[0].strip()
time_to_hundo = time_to_hundo.replace(',', '.')
time_to_hundo = float(time_to_hundo)
# Points may or may not be present, default to 0 if absent
points = response.xpath('//table[@class = "Default1000"]/tr/td[span = "Points"]/text()[last()]').extract()
if not points:
points = 0
else:
points = int(points[0].strip())
yield {
'title': title,
'time to 100%': time_to_hundo,
'points': points,
}
def parse_search_result_for_apps(response):
for href in response.xpath('//table//table//a/@href'):
relative_url = href.extract()
if relative_url.startswith('Steam_Game_Info.php?AppID='):
yield {
'app_id' : relative_url[len('Steam_Game_Info.php?AppID='):]
}
| def parse_app_page(response):
# Should always be able to grab a title
title = response.xpath('//div[@class = "panel panel-default panel-gameinfo"]/div[@class = "panel-heading"]/text()').extract()[0].strip()
# Parse times into floats
time_to_hundo = response.xpath('//table[@class = "Default1000"]/tr/td[span = "Hours to 100%"]/text()[last()]').extract()[0].strip()
time_to_hundo = time_to_hundo.replace(',', '.')
time_to_hundo = float(time_to_hundo)
# Points may or may not be present, default to 0 if absent
points = response.xpath('//table[@class = "Default1000"]/tr/td[span = "Points"]/text()[last()]').extract()
if not points:
points = 0
else:
points = int(points[0].strip())
yield {
'title': title,
'time to 100%': time_to_hundo,
'points': points,
}
def parse_search_result_for_apps(response):
for href in response.xpath('//table//table//a/@href'):
relative_url = href.extract()
if relative_url.startswith('Steam_Game_Info.php?AppID='):
yield relative_url[:len('Steam_Game_Info.php?AppID=')]
| mit | Python |
6b9e636f24ca12a33b9e9a761e6c26ec1b36dec8 | update label in form | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/maptopicprio/forms.py | meinberlin/apps/maptopicprio/forms.py | from ckeditor_uploader import fields
from django import forms
from django.utils.translation import ugettext_lazy as _
from adhocracy4.categories.forms import CategorizableFieldMixin
from adhocracy4.labels.mixins import LabelsAddableFieldMixin
from adhocracy4.maps import widgets as maps_widgets
from . import models
class MapTopicForm(CategorizableFieldMixin,
LabelsAddableFieldMixin,
forms.ModelForm):
description = fields.RichTextUploadingFormField(
config_name='image-editor', required=True)
def __init__(self, *args, **kwargs):
self.settings = kwargs.pop('settings_instance')
super().__init__(*args, **kwargs)
self.fields['point'].widget = maps_widgets.MapChoosePointWidget(
polygon=self.settings.polygon)
self.fields['point'].error_messages['required'] = _(
'Please locate your proposal on the map.')
self.fields['description'].label = _('Description')
class Meta:
model = models.MapTopic
fields = ['name', 'description', 'category',
'labels', 'point', 'point_label']
labels = {
'point': _('Locate the place on a map'),
'point_label': _('Place label')
}
| from ckeditor_uploader import fields
from django import forms
from django.utils.translation import ugettext_lazy as _
from adhocracy4.categories.forms import CategorizableFieldMixin
from adhocracy4.labels.mixins import LabelsAddableFieldMixin
from adhocracy4.maps import widgets as maps_widgets
from . import models
class MapTopicForm(CategorizableFieldMixin,
LabelsAddableFieldMixin,
forms.ModelForm):
description = fields.RichTextUploadingFormField(
config_name='image-editor', required=True)
def __init__(self, *args, **kwargs):
self.settings = kwargs.pop('settings_instance')
super().__init__(*args, **kwargs)
self.fields['point'].widget = maps_widgets.MapChoosePointWidget(
polygon=self.settings.polygon)
self.fields['point'].error_messages['required'] = _(
'Please locate your proposal on the map.')
class Meta:
model = models.MapTopic
fields = ['name', 'description', 'category',
'labels', 'point', 'point_label']
labels = {
'point': _('Locate the place on a map'),
'point_label': _('Place label'),
}
| agpl-3.0 | Python |
ddbefe2684cfc375e47c2ef1763bb1214a822f02 | Add import of RetentionContestOption | opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata | opencivicdata/elections/models/__init__.py | opencivicdata/elections/models/__init__.py | # flake8: NOQA
from .election import Election, ElectionIdentifier, ElectionSource
from .candidacy import Candidacy, CandidacySource
from .contests.base import ContestBase
from .contests.ballot_measure import (BallotMeasureContest, BallotMeasureContestOption,
BallotMeasureContestIdentifier,
BallotMeasureContestSource,
RetentionContest, RetentionContestOption,
RetentionContestIdentifier,
RetentionContestSource)
from .contests.candidate import (CandidateContest, CandidateContestPost,
CandidateContestIdentifier, CandidateContestSource)
from .contests.party import (PartyContest, PartyContestOption,
PartyContestIdentifier, PartyContestSource)
| # flake8: NOQA
from .election import Election, ElectionIdentifier, ElectionSource
from .candidacy import Candidacy, CandidacySource
from .contests.base import ContestBase
from .contests.ballot_measure import (BallotMeasureContest, BallotMeasureContestOption,
BallotMeasureContestIdentifier,
BallotMeasureContestSource,
RetentionContest, RetentionContestIdentifier,
RetentionContestSource)
from .contests.candidate import (CandidateContest, CandidateContestPost,
CandidateContestIdentifier, CandidateContestSource)
from .contests.party import (PartyContest, PartyContestOption,
PartyContestIdentifier, PartyContestSource)
| bsd-3-clause | Python |
9c7231be8440b575ff0758ea4f1cb0d7c1fe7f96 | disable fixture tests on CI due to wonky GDAL version on travis. 'pyb -v django_test' will still execute these. | hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub | src/main/python/hub/tests/tests_fixtures.py | src/main/python/hub/tests/tests_fixtures.py | # -*- coding: utf-8 -*-
'''
This test tries to format all fixtures with all available formats.
'''
from __future__ import unicode_literals
from rest_framework.test import APITestCase
import os
class FixtureTest(APITestCase):
pass
EXCLUDED_DOCUMENTS = [
'Dummy', # those are for paging tests and just repeat
'employee', # excessive amounts of data, actually segfaults for interlis1
'children' # same
]
def find_fixtures(client):
documents = client.get('/api/v1/document/?count=50&page=1')
transformations = client.get('/api/v1/transformation/?count=50&page=1')
fixtures = []
for doc in documents.data['results']:
if all(doc['name'].find(excluded) < 0 for excluded in EXCLUDED_DOCUMENTS):
file_groups = client.get(doc['file_groups'])
for fg in file_groups.data:
fixtures.append(('ODH{}'.format(fg['id']), fg['data']))
for trf in transformations.data['results']:
fixtures.append(('TRF{}'.format(trf['id']), '/api/v1/transformation/{}/data/'.format(trf['id'])))
return fixtures
def get_fixture_test(id, url, fmt):
def fixture_test(self):
data_url = '{}?fmt={}'.format(url, fmt)
response = self.client.get(data_url)
self.assertEqual(200, response.status_code)
return fixture_test
IS_CI = 'CI' in os.environ
if not IS_CI:
from rest_framework.test import APIClient
print 'Creating test cases...'
client = APIClient()
fixtures = find_fixtures(client)
format_list = [fmt['name'].lower() for fmt in client.get('/api/v1/format/').data]
if 'geopackage' in format_list: # GDAL >= 2.0.0 - support in 1.11.x is basically unusable
format_list.remove('geopackage')
for (id, url) in fixtures:
for fmt in format_list:
test = get_fixture_test(id, url, fmt)
test_name = 'test_{}_{}'.format(id.lower(), fmt.lower())
setattr(FixtureTest, test_name, test)
print 'Preparations done.'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.test import APITestCase
'''
This test tries to format all fixtures with all available formats.
'''
class FixtureTest(APITestCase):
pass
EXCLUDED_DOCUMENTS = [
'Dummy', # those are for paging tests and just repeat
'employee', # excessive amounts of data, actually segfaults for interlis1
'children' # same
]
def find_fixtures(client):
documents = client.get('/api/v1/document/?count=50&page=1')
transformations = client.get('/api/v1/transformation/?count=50&page=1')
fixtures = []
for doc in documents.data['results']:
if all(doc['name'].find(excluded) < 0 for excluded in EXCLUDED_DOCUMENTS):
file_groups = client.get(doc['file_groups'])
for fg in file_groups.data:
fixtures.append(('ODH{}'.format(fg['id']), fg['data']))
for trf in transformations.data['results']:
fixtures.append(('TRF{}'.format(trf['id']), '/api/v1/transformation/{}/data/'.format(trf['id'])))
return fixtures
def get_fixture_test(id, url, fmt):
def fixture_test(self):
data_url = '{}?fmt={}'.format(url, fmt)
response = self.client.get(data_url)
self.assertEqual(200, response.status_code)
return fixture_test
from rest_framework.test import APIClient
print 'Creating test cases...'
client = APIClient()
fixtures = find_fixtures(client)
format_list = [fmt['name'].lower() for fmt in client.get('/api/v1/format/').data]
if 'geopackage' in format_list: # GDAL >= 2.0.0 - support in 1.11.x is basically unusable
format_list.remove('geopackage')
# if 'interlis1' in format_list: # GDAL < 1.11.0 - results in segfault from 1.11.0 onwards (not fixed in 2.0.0beta1)
# format_list.remove('interlis1')
for (id, url) in fixtures:
for fmt in format_list:
test = get_fixture_test(id, url, fmt)
test_name = 'test_{}_{}'.format(id.lower(), fmt.lower())
setattr(FixtureTest, test_name, test)
print 'Preparations done.'
| mit | Python |
b3a0ef94e4d892ff1cd11e2caa138f4388732aed | Add architecture to RuntimeSerializer | Turupawn/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website,lutris/website,lutris/website,Turupawn/website | runners/serializers.py | runners/serializers.py | # pylint: disable=R0903
from rest_framework import serializers
from .models import Runner, RunnerVersion, Runtime
class RunnerVersionSerializer(serializers.ModelSerializer):
class Meta(object):
model = RunnerVersion
fields = ('version', 'architecture', 'url', 'default')
class RunnerSerializer(serializers.ModelSerializer):
versions = RunnerVersionSerializer(many=True)
class Meta(object):
model = Runner
fields = ('name', 'slug', 'icon', 'website', 'versions')
class RuntimeSerializer(serializers.ModelSerializer):
class Meta(object):
model = Runtime
fields = ('name', 'created_at', 'architecture', 'url')
| # pylint: disable=R0903
from rest_framework import serializers
from .models import Runner, RunnerVersion, Runtime
class RunnerVersionSerializer(serializers.ModelSerializer):
class Meta(object):
model = RunnerVersion
fields = ('version', 'architecture', 'url', 'default')
class RunnerSerializer(serializers.ModelSerializer):
versions = RunnerVersionSerializer(many=True)
class Meta(object):
model = Runner
fields = ('name', 'slug', 'icon', 'website', 'versions')
class RuntimeSerializer(serializers.ModelSerializer):
class Meta(object):
model = Runtime
fields = ('name', 'created_at', 'url')
| agpl-3.0 | Python |
c5fcd5e333491ffd5a1c802cc6332c8214b02021 | Fix datetime/timezone issue | hackerspace-ntnu/website,hackerspace-ntnu/website,hackerspace-ntnu/website | authentication/models.py | authentication/models.py | import uuid
from datetime import timedelta
from django.utils import timezone
from django.contrib.auth.admin import User
from django.db import models
# Time the activation is valid in hours
VALID_TIME = 2
class UserAuthentication(models.Model):
user = models.ForeignKey(User)
key = models.UUIDField(default=uuid.uuid4, editable=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
# Activates the user and deletes the authentication object
def activate(self):
self.user.is_active = True
self.user.save()
self.delete()
# Set the password and deletes the authentication object
def set_password(self, password):
self.user.set_password(password)
self.user.save()
self.delete()
# Checks if the authentication object is expired
def expired(self):
return not timezone.now() < timedelta(hours=VALID_TIME) + self.created
| import uuid
from datetime import datetime, timedelta
from django.contrib.auth.admin import User
from django.db import models
# Time the activation is valid in hours
VALID_TIME = 2
class UserAuthentication(models.Model):
user = models.ForeignKey(User)
key = models.UUIDField(default=uuid.uuid4, editable=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
# Activates the user and deletes the authentication object
def activate(self):
self.user.is_active = True
self.user.save()
self.delete()
# Set the password and deletes the authentication object
def set_password(self, password):
self.user.set_password(password)
self.user.save()
self.delete()
# Checks if the authentication object is expired
def expired(self):
return not datetime.now() < timedelta(hours=VALID_TIME) + self.created
| mit | Python |
c24adb58c9a64fdaf2c0ad598b9431636dba31de | fix ConfigParser import | funginstitute/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,yngcan/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor | lib/config_parser.py | lib/config_parser.py | from ConfigParser import ConfigParser
defaults = {'parse': 'defaultparse',
'clean': True,
'consolidate': True,
'datadir': '/data/patentdata/patents/2013',
'dataregex': 'ipg\d{6}.xml',
'years': None,
'downloaddir' : None}
def extract_process_options(handler):
"""
Extracts the high level options from the [process] section
of the configuration file. Returns a dictionary of the options
"""
result = {}
result['parse'] = handler.get('process','parse')
result['clean'] = handler.get('process','clean') == 'True'
result['consolidate'] = handler.get('process','consolidate') == 'True'
result['outputdir'] = handler.get('process','outputdir')
return result
def extract_parse_options(handler, section):
options = {}
options['datadir'] = handler.get(section,'datadir')
options['dataregex'] = handler.get(section,'dataregex')
options['years'] = handler.get(section,'years')
options['downloaddir'] = handler.get(section,'downloaddir')
return options
def get_config_options(configfile):
handler = ConfigParser(defaults)
handler.read(configfile)
process_config = extract_process_options(handler)
parse_config = extract_parse_options(handler, process_config['parse'])
return process_config, parse_config
| import ConfigParser
defaults = {'parse': 'defaultparse',
'clean': True,
'consolidate': True,
'datadir': '/data/patentdata/patents/2013',
'dataregex': 'ipg\d{6}.xml',
'years': None,
'downloaddir' : None}
def extract_process_options(handler):
"""
Extracts the high level options from the [process] section
of the configuration file. Returns a dictionary of the options
"""
result = {}
result['parse'] = handler.get('process','parse')
result['clean'] = handler.get('process','clean') == 'True'
result['consolidate'] = handler.get('process','consolidate') == 'True'
result['outputdir'] = handler.get('process','outputdir')
return result
def extract_parse_options(handler, section):
options = {}
options['datadir'] = handler.get(section,'datadir')
options['dataregex'] = handler.get(section,'dataregex')
options['years'] = handler.get(section,'years')
options['downloaddir'] = handler.get(section,'downloaddir')
return options
def get_config_options(configfile):
handler = ConfigParser(defaults)
handler.read(configfile)
process_config = extract_process_options(handler)
parse_config = extract_parse_options(handler, process_config['parse'])
return process_config, parse_config
| bsd-2-clause | Python |
3c0c9778252510ac9c4088f20d4bc0dd5ea1f63c | Update point.py | aelbouchti/Implot | implib/point.py | implib/point.py | from math import sqrt
class point():
#declaration point xyz 3D
def __init__(self, *args):
self.X= 0
self.Y= 0
self.Z= 0
self.L= []
self.dim= 0
for i, j in enumerate(args):
self.L.append(j)
self.dim= i+1
while self.dim<3:
self.L.append(0)
self.dim+= 1
self.X= self.L[0]
self.Y= self.L[1]
self.Z= self.L[2]
def _avx(self, value):
self.L[0]+= value
def _avy(self, value):
self.L[1]+= value
def _avz(self, value):
self.L[2]+= value
def _xyz(self): return (self.L[0], self.L[1], self.L[2])
def _dim(self): return self.dim
def _avi(self, i, value):
self.L[i]+= value
| from math import sqrt
class point():
#declaration point xyz 3D
def __init__(self, *args):
self.X=0
self.Y=0
self.Z=0
self.L=[]
self.dim=0
for i, j in enumerate(args):
self.L.append(j)
self.dim=i+1
while self.dim<3:
self.L.append(0)
self.dim+=1
self.X=self.L[0]
self.Y=self.L[1]
self.Z=self.L[2]
def _avx(self, value):
self.L[0]+=value
def _avy(self, value):
self.L[1]+=value
def _avz(self, value):
self.L[2]+=value
def _xyz(self): return self.L[0], self.L[1], self.L[2]
def _dim(self): return self.dim
def _avi(self, i, value):
self.L[i]+=value
| mit | Python |
37ab91ad50bb8f473012d8bf678445707c2da7b1 | Delete print | maferelo/saleor,UITools/saleor,UITools/saleor,mociepka/saleor,UITools/saleor,UITools/saleor,maferelo/saleor,maferelo/saleor,mociepka/saleor,UITools/saleor,mociepka/saleor | saleor/core/filters.py | saleor/core/filters.py | from __future__ import unicode_literals
from django_filters import FilterSet
class SortedFilterSet(FilterSet):
'''
Base class for filtersets used in dashboard views. Adds flag
is_bound_unsorted to indicate if FilterSet has data from filters other
than sort_by.
'''
def __init__(self, data, *args, **kwargs):
data_copy = data.copy() if data else None
if data:
if data_copy.get('sort_by', None):
del data_copy['sort_by']
if data_copy:
self.is_bound_unsorted = True
else:
self.is_bound_unsorted = False
super(SortedFilterSet, self).__init__(data, *args, **kwargs)
| from __future__ import unicode_literals
from django_filters import FilterSet
class SortedFilterSet(FilterSet):
'''
Base class for filtersets used in dashboard views. Adds flag
is_bound_unsorted to indicate if FilterSet has data from filters other
than sort_by.
'''
def __init__(self, data, *args, **kwargs):
data_copy = data.copy() if data else None
print data
if data:
if data_copy.get('sort_by', None):
del data_copy['sort_by']
if data_copy:
self.is_bound_unsorted = True
else:
self.is_bound_unsorted = False
super(SortedFilterSet, self).__init__(data, *args, **kwargs)
| bsd-3-clause | Python |
cd41fdbdb53008c9701213d4f223bb8df0514ecb | Remove unused custom functions `local_tz_to_utc`, `utc_to_local_tz` | homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | byceps/util/datetime/timezone.py | byceps/util/datetime/timezone.py | """
byceps.util.datetime.timezone
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Timezone helpers
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import current_app
def get_timezone_string() -> str:
"""Return the configured default timezone as a string."""
return current_app.config['TIMEZONE']
| """
byceps.util.datetime.timezone
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Timezone helpers
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from flask import current_app
import pendulum
def local_tz_to_utc(dt: datetime):
"""Convert date/time object from configured default local time to UTC."""
tz_str = get_timezone_string()
return (pendulum.instance(dt)
.set(tz=tz_str)
.in_tz(pendulum.UTC)
# Keep SQLAlchemy from converting it to another zone.
.replace(tzinfo=None))
def utc_to_local_tz(dt: datetime) -> datetime:
"""Convert naive date/time object from UTC to configured time zone."""
tz_str = get_timezone_string()
return pendulum.instance(dt).in_tz(tz_str)
def get_timezone_string() -> str:
"""Return the configured default timezone as a string."""
return current_app.config['TIMEZONE']
| bsd-3-clause | Python |
90015e2477131c7169ac0742b266b67f3f2ac78c | fix title canonicalization | karissa/papertalk,karissa/papertalk,karissa/papertalk | papertalk/utils/utils.py | papertalk/utils/utils.py | import urllib, urllib2
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise ImportError
import datetime
from bson.objectid import ObjectId
from werkzeug import Response
UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
def scrape(url):
urllib.quote(url.encode('utf-8'))
req = urllib2.Request(url=url,
headers={'User-Agent': UA})
hdl = urllib2.urlopen(req)
html = hdl.read()
return html
class MongoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
elif isinstance(obj, ObjectId):
return unicode(obj)
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(json.dumps(dict(*args, **kwargs), cls=MongoJsonEncoder), mimetype='application/json')
def canonicalize(title, year):
return title.lower().strip() + '-' + str(year).lower().strip()
| import urllib, urllib2
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise ImportError
import datetime
from bson.objectid import ObjectId
from werkzeug import Response
UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
def scrape(url):
urllib.quote(url.encode('utf-8'))
req = urllib2.Request(url=url,
headers={'User-Agent': UA})
hdl = urllib2.urlopen(req)
html = hdl.read()
return html
class MongoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
elif isinstance(obj, ObjectId):
return unicode(obj)
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(json.dumps(dict(*args, **kwargs), cls=MongoJsonEncoder), mimetype='application/json')
def canonicalize(title, year):
return title.lower().strip() + '-' + year.lower().strip()
| mit | Python |
5415f0b66b08ef964c1c3e3f94ea37c4a6542dec | update Sevenoaks import script | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_sevenoaks.py | polling_stations/apps/data_collection/management/commands/import_sevenoaks.py | from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'E07000111'
districts_name = 'New May 2017/SDC_PollingDistricts_2017'
stations_name = 'New May 2017/SDC_CouncilElections2017.shp'
elections = ['local.kent.2017-05-04']
def district_record_to_dict(self, record):
code = str(record[3]).strip()
return {
'internal_council_id': code,
'name': str(record[0]).strip(),
'polling_station_id': code,
}
def station_record_to_dict(self, record):
return {
'internal_council_id': str(record[2]).strip(),
'postcode': '',
'address': str(record[10]).strip(),
}
| from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'E07000111'
districts_name = 'May 2017/SDC_PollingDistricts_2017'
stations_name = 'May 2017/SDC_CouncilElecPollingStn2017.shp'
elections = ['local.kent.2017-05-04']
def district_record_to_dict(self, record):
code = str(record[3]).strip()
return {
'internal_council_id': code,
'name': str(record[0]).strip(),
'polling_station_id': code,
}
def station_record_to_dict(self, record):
return {
'internal_council_id': str(record[1]).strip(),
'postcode': '',
'address': str(record[9]).strip(),
}
| bsd-3-clause | Python |
5f5471fd4c8a4b5563c846cfcf271e00c4246160 | add loader processor | cappatar/knesset-data-pipelines,cappatar/knesset-data-pipelines | datapackage_pipelines_knesset/members/processors/load_members.py | datapackage_pipelines_knesset/members/processors/load_members.py | from datapackage_pipelines_knesset.common.base_processors.add_resource import AddResourceBaseProcessor
# only loads members with the following positionId:
SUPPORTED_POSITION_IDS = [43, 61]
class Processor(AddResourceBaseProcessor):
def _get_schema(self, resource_descriptor):
return resource_descriptor.get("schema", {
"fields": [
{"name": "url", "type": "string", "description": "url to download protocol from"},
{
"name": "kns_person_id", "type": "integer",
"description": "primary key from kns_person table"}
],
"primaryKey": ["kns_person_id"]
})
def _get_new_resource(self):
person_table = self.db_meta.tables.get("kns_person")
persontoposition_table = self.db_meta.tables.get("kns_persontoposition")
if person_table is None or persontoposition_table is None:
raise Exception("processor requires kns person tables to exist")
for db_row in self.db_session\
.query(person_table, persontoposition_table)\
.filter(persontoposition_table.p.PersonID==person_table.p.PersonID)\
.filter(persontoposition_table.p.PositionID.in_(SUPPORTED_POSITION_IDS))\
.all():
row = db_row._asdict()
yield {"url": row["FilePath"],
"kns_person_id": row["PersonID"]}
if __name__ == "__main__":
Processor.main()
| from datapackage_pipelines_knesset.common.base_processors.add_resource import AddResourceBaseProcessor
from sqlalchemy import or_
import os
import logging
# only loads members with the following positionId:
SUPPORTED_POSITION_IDS = [43, 61]
class Processor(AddResourceBaseProcessor):
def _get_schema(self, resource_descriptor):
return resource_descriptor.get("schema", {
"fields": [
{"name": "url", "type": "string", "description": "url to download protocol from"},
{
"name": "kns_person_id", "type": "integer",
"description": "primary key from kns_person table"}
],
"primaryKey": ["kns_person_id"]
})
def _get_new_resource(self):
person_table = self.db_meta.tables.get("kns_person")
persontoposition_table = self.db_meta.tables.get("kns_persontoposition")
if person_table is None or persontoposition_table is None:
raise Exception("processor requires kns person tables to exist")
for db_row in self.db_session\
.query(person_table, persontoposition_table)\
.filter(persontoposition_table.p.PersonID==person_table.p.PersonID)\
.filter(persontoposition_table.p.PositionID.in_(SUPPORTED_POSITION_IDS))\
.all():
row = db_row._asdict()
yield {"url": row["FilePath"],
"kns_person_id": row["PersonID"]}
if __name__ == "__main__":
Processor.main()
| mit | Python |
8b562e43d003f5ced034cb7e27e630dcd33168ad | Update at 2017-07-14 21-45-02 | amoshyc/tthl-code | train_timeconv.py | train_timeconv.py | import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from data import window_generator
from utils import get_callbacks
def main():
n_train = 25000
n_val = 5000
batch_size = 40
timesteps = 10
model = Sequential()
model.add(TimeDistributed(BatchNormalization(), input_shape=(timesteps, 224, 224, 3)))
model.add(TimeDistributed(Conv2D(4, kernel_size=5, strides=3, activation='relu')))
model.add(TimeDistributed(Conv2D(8, kernel_size=5, strides=2, activation='relu')))
model.add(TimeDistributed(Conv2D(12, kernel_size=3, strides=1, activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=3)))
model.add(Conv3D(4, kernel_size=3, strides=1, activation='relu'))
model.add(Flatten())
model.add(Dense(30))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
dataset = Path('~/dataset/').expanduser().resolve()
video_dirs = sorted([x for x in dataset.iterdir() if x.is_dir()])
train_dirs = [(dataset / 'video01/')]
val_dirs = [(dataset / 'video00')]
train_gen = window_generator(train_dirs, n_train, batch_size, timesteps)
val_gen = window_generator(val_dirs, n_val, batch_size, timesteps)
fit_arg = {
'generator': train_gen,
'steps_per_epoch': n_train // batch_size,
'epochs': 30,
'validation_data': val_gen,
'validation_steps': n_val // batch_size,
'callbacks': get_callbacks('timeconv')
}
# model.fit_generator(**fit_arg)
if __name__ == '__main__':
main() | import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from data import window_generator
from utils import get_callbacks
def main():
n_train = 25000
n_val = 5000
batch_size = 40
timesteps = 10
model = Sequential()
model.add(TimeDistributed(BatchNormalization(), input_shape=(timesteps, 224, 224, 3)))
model.add(TimeDistributed(Conv2D(4, kernel_size=5, strides=3, activation='relu')))
model.add(TimeDistributed(Conv2D(8, kernel_size=5, strides=2, activation='relu')))
model.add(TimeDistributed(Conv2D(12, kernel_size=3, strides=1, activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=3)))
model.add(Flatten())
model.add(Dense(30))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
dataset = Path('~/dataset/').expanduser().resolve()
video_dirs = sorted([x for x in dataset.iterdir() if x.is_dir()])
train_dirs = [(dataset / 'video01/')]
val_dirs = [(dataset / 'video00')]
train_gen = window_generator(train_dirs, n_train, batch_size, timesteps)
val_gen = window_generator(val_dirs, n_val, batch_size, timesteps)
fit_arg = {
'generator': train_gen,
'steps_per_epoch': n_train // batch_size,
'epochs': 30,
'validation_data': val_gen,
'validation_steps': n_val // batch_size,
'callbacks': get_callbacks('timeconv')
}
model.fit_generator(**fit_arg)
if __name__ == '__main__':
main() | apache-2.0 | Python |
d5ea69a6b065be5fbba3f4b990373d537318d00c | bump version | slash-testing/backslash-python,vmalloc/backslash-python | backslash/__version__.py | backslash/__version__.py | __version__ = "2.19.2"
| __version__ = "2.19.1"
| bsd-3-clause | Python |
0fb3f5840532200ce21042f47d770e0d2ceb068b | fix up comments in staging settings file | texastribune/txlege84,texastribune/txlege84,texastribune/txlege84,texastribune/txlege84 | txlege84/txlege84/settings/staging.py | txlege84/txlege84/settings/staging.py | ####################
# STAGING SETTINGS #
####################
import dj_database_url
from .base import *
LOGGING = {
'version': 1,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
}
},
}
######################
# HOST CONFIGURATION #
######################
# https://docs.djangoproject.com/en/1.7/ref/settings/#allowed-hosts
# https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['.texastribune.org']
##########################
# DATABASE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config()
}
#######################
# CACHE CONFIGURATION #
#######################
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
############################
# SECRET KEY CONFIGURATION #
############################
# https://docs.djangoproject.com/en/1.7/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
################################
# DJANGO STORAGE CONFIGURATION #
################################
# http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html
# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
| #######################
# PRODUCTION SETTINGS #
#######################
import dj_database_url
from .base import *
LOGGING = {
'version': 1,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
}
},
}
######################
# HOST CONFIGURATION #
######################
# https://docs.djangoproject.com/en/1.7/ref/settings/#allowed-hosts
# https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['.texastribune.org'] #FIXME
##########################
# DATABASE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config()
}
#######################
# CACHE CONFIGURATION #
#######################
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
############################
# SECRET KEY CONFIGURATION #
############################
# https://docs.djangoproject.com/en/1.7/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
################################
# DJANGO STORAGE CONFIGURATION #
################################
# http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html
# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
| mit | Python |
d5ab2226cab8d695854961774835cea25576e39c | remove redirect | theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs | bulbs/videos/views.py | bulbs/videos/views.py | from django.conf import settings
from django.http import Http404
from django.views.generic import DetailView
from django.views.decorators.cache import cache_control
import requests
class SeriesDetailView(DetailView):
def get_template_names(self):
template_names = ["videos/series-detail.html"]
return template_names
def get(self, request, *args, **kwargs):
slug = kwargs.get("slug", None)
if not slug:
raise Http404
response = requests.get(settings.VIDEOHUB_BASE_URL+"/series/"+slug+".json")
if not response.ok:
raise Http404
self.object = response.json()
context = self.get_context_data()
response = self.render_to_response(context)
return response
def get_context_data(self, *args, **kwargs):
context = super(SeriesDetailView, self).get_context_data()
context["channel_name"] = self.object['channel_name']
context["series_name"] = self.object['series_name']
context["series_description"] = self.object['series_description']
context["total_seasons"] = self.object['total_seasons']
context["total_episodes"] = self.object['total_episodes']
return context
series_detail = cache_control(max_age=600)(SeriesDetailView.as_view())
| from django.conf import settings
from django.http import Http404
from django.views.generic import DetailView
from django.views.decorators.cache import cache_control
import requests
class SeriesDetailView(DetailView):
redirect_correct_path = False
def get_template_names(self):
template_names = ["videos/series-detail.html"]
return template_names
def get(self, request, *args, **kwargs):
slug = kwargs.get("slug", None)
if not slug:
raise Http404
response = requests.get(settings.VIDEOHUB_BASE_URL+"/series/"+slug+".json")
if not response.ok:
raise Http404
self.object = response.json()
context = self.get_context_data()
response = self.render_to_response(context)
return response
def get_context_data(self, *args, **kwargs):
context = super(SeriesDetailView, self).get_context_data()
context["channel_name"] = self.object['channel_name']
context["series_name"] = self.object['series_name']
context["series_description"] = self.object['series_description']
context["total_seasons"] = self.object['total_seasons']
context["total_episodes"] = self.object['total_episodes']
return context
series_detail = cache_control(max_age=600)(SeriesDetailView.as_view())
| mit | Python |
1af389a1e92b531d1a78db51b45169536c48ef25 | bump to 0.8.16. | tsuru/tsuru-circus | tsuru/__init__.py | tsuru/__init__.py | # Copyright 2015 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.8.16"
| # Copyright 2015 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.8.15"
| bsd-3-clause | Python |
61fdbe0dba79dc19cda5320a0ad1352facf12d3d | Rework imports and ignore known mypy issues | pypa/twine | twine/__init__.py | twine/__init__.py | # Copyright 2018 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = (
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
)
__copyright__ = "Copyright 2019 Donald Stufft and individual contributors"
try:
# https://github.com/python/mypy/issues/1393
from importlib.metadata import metadata # type: ignore
except ImportError:
# https://github.com/python/mypy/issues/1153
from importlib_metadata import metadata # type: ignore
twine_metadata = metadata('twine')
__title__ = twine_metadata['name']
__summary__ = twine_metadata['summary']
__uri__ = twine_metadata['home-page']
__version__ = twine_metadata['version']
__author__ = twine_metadata['author']
__email__ = twine_metadata['author-email']
__license__ = twine_metadata['license']
| # Copyright 2018 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = (
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
)
__copyright__ = "Copyright 2019 Donald Stufft and individual contributors"
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
metadata = importlib_metadata.metadata('twine')
__title__ = metadata['name']
__summary__ = metadata['summary']
__uri__ = metadata['home-page']
__version__ = metadata['version']
__author__ = metadata['author']
__email__ = metadata['author-email']
__license__ = metadata['license']
| apache-2.0 | Python |
7fbbaf258d6c77b72b76d3a324ed7ea06d8f923e | Rename post data variable to be more inline with requests default | incuna/django-txtlocal | txtlocal/utils.py | txtlocal/utils.py | import requests
from django.conf import settings
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.utils.http import urlencode
def send_sms(text, recipient_list, sender=None,
username=None, password=None, **kwargs):
"""
Render and send an SMS template.
The current site will be added to any context dict passed.
recipient_list: A list of number strings. Each number must be purely
numeric, so no '+' symbols, hyphens or spaces. The
numbers must be prefixed with their international
dialling codes.
eg. UK numbers would look like 447123456789.
text: The message text. (Gets URI encoded.)
sender: Must be word up to 11 characters or a number up to 14 digits.
Defaults to settings.TXTLOCAL_FROM. (Must be a string.)
username: Defaults to settings.TXTLOCAL_USERNAME.
password: Defaults to settings.TXTLOCAL_PASSWORD.
**kwargs: Will be passed through to textlocal in the POST data.
Any unrecognised kwargs will be passed to txtlocal in the POST data.
"""
payload = {
'selectednums': ','.join(recipient_list),
'message': urlencode(text),
'uname': username or settings.TXTLOCAL_USERNAME,
'pword': password or password.TXTLOCAL_PASSWORD,
'from': sender or settings.TXTLOCAL_FROM,
'json': 1, # This makes textlocal send us back info about the request.
}
url = getattr(settings, 'TXTLOCAL_ENDPOINT', 'https://www.txtlocal.com/sendsmspost.php')
response = requests.post(url, data=payload).json()
available = response.get('CreditsAvailable')
required = response.get('CreditsRequired')
remaining = response.get('CreditsRemaining')
if (
available is None or
required is None or
remaining is None or
required <= 0 or
available - required != remaining
):
err = 'Message may not have been sent. Response was: "%s"' % response
raise RuntimeError(err)
def render_to_send_sms(template, context=None, **kwargs):
"""
Render and send an SMS template.
The current site will be added to any context dict passed.
Any unrecognised kwargs will be passed to send_sms.
"""
if context is None:
context = {}
context['site'] = Site.objects.get_current()
text = render_to_string(template, context)
send_sms(text, **kwargs)
| import requests
from django.conf import settings
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.utils.http import urlencode
def send_sms(text, recipient_list, sender=None,
username=None, password=None, **kwargs):
"""
Render and send an SMS template.
The current site will be added to any context dict passed.
recipient_list: A list of number strings. Each number must be purely
numeric, so no '+' symbols, hyphens or spaces. The
numbers must be prefixed with their international
dialling codes.
eg. UK numbers would look like 447123456789.
text: The message text. (Gets URI encoded.)
sender: Must be word up to 11 characters or a number up to 14 digits.
Defaults to settings.TXTLOCAL_FROM. (Must be a string.)
username: Defaults to settings.TXTLOCAL_USERNAME.
password: Defaults to settings.TXTLOCAL_PASSWORD.
**kwargs: Will be passed through to textlocal in the POST data.
Any unrecognised kwargs will be passed to txtlocal in the POST data.
"""
post_data = {
'selectednums': ','.join(recipient_list),
'message': urlencode(text),
'uname': username or settings.TXTLOCAL_USERNAME,
'pword': password or password.TXTLOCAL_PASSWORD,
'from': sender or settings.TXTLOCAL_FROM,
'json': 1, # This makes textlocal send us back info about the request.
}
url = getattr(settings, 'TXTLOCAL_ENDPOINT', 'https://www.txtlocal.com/sendsmspost.php')
response = requests.post(url, data=post_data).json()
available = response.get('CreditsAvailable')
required = response.get('CreditsRequired')
remaining = response.get('CreditsRemaining')
if (
available is None or
required is None or
remaining is None or
required <= 0 or
available - required != remaining
):
err = 'Message may not have been sent. Response was: "%s"' % response
raise RuntimeError(err)
def render_to_send_sms(template, context=None, **kwargs):
"""
Render and send an SMS template.
The current site will be added to any context dict passed.
Any unrecognised kwargs will be passed to send_sms.
"""
if context is None:
context = {}
context['site'] = Site.objects.get_current()
text = render_to_string(template, context)
send_sms(text, **kwargs)
| bsd-2-clause | Python |
a13244776b9017e8a339a94e0fe06d32fa61596d | Hide output from some tests | pfmoore/invoke,mkusz/invoke,pfmoore/invoke,pyinvoke/invoke,mkusz/invoke,pyinvoke/invoke | integration/runners.py | integration/runners.py | import os
from spec import Spec
from invoke import run
from _util import assert_cpu_usage
class Runner_(Spec):
def setup(self):
os.chdir(os.path.join(os.path.dirname(__file__), '_support'))
class responding:
# TODO: update respond_*.py so they timeout instead of hanging forever
# when not being responded to
def base_case(self):
# Basic "doesn't explode" test: respond.py will exit nonzero unless
# this works, causing a Failure.
responses = {r"What's the password\?": "Rosebud\n"}
# Gotta give -u or Python will line-buffer its stdout, so we'll
# never actually see the prompt.
run("python -u respond_base.py", responses=responses, hide=True)
def both_streams(self):
responses = {
"standard out": "with it\n",
"standard error": "between chair and keyboard\n",
}
run("python -u respond_both.py", responses=responses, hide=True)
def stdin_mirroring_isnt_cpu_heavy(self):
"stdin mirroring isn't CPU-heavy"
with assert_cpu_usage(lt=5.0):
run("python -u busywork.py 10", pty=True, hide=True)
class stdin_mirroring:
def piped_stdin_is_not_conflated_with_mocked_stdin(self):
# Re: GH issue #308
# Will die on broken-pipe OSError if bug is present.
run("echo 'lollerskates' | inv -c nested_or_piped foo", hide=True)
def nested_invoke_sessions_not_conflated_with_mocked_stdin(self):
# Also re: GH issue #308. This one will just hang forever. Woo!
run("inv -c nested_or_piped calls_foo", hide=True)
| import os
from spec import Spec
from invoke import run
from _util import assert_cpu_usage
class Runner_(Spec):
def setup(self):
os.chdir(os.path.join(os.path.dirname(__file__), '_support'))
class responding:
# TODO: update respond_*.py so they timeout instead of hanging forever
# when not being responded to
def base_case(self):
# Basic "doesn't explode" test: respond.py will exit nonzero unless
# this works, causing a Failure.
responses = {r"What's the password\?": "Rosebud\n"}
# Gotta give -u or Python will line-buffer its stdout, so we'll
# never actually see the prompt.
run("python -u respond_base.py", responses=responses, hide=True)
def both_streams(self):
responses = {
"standard out": "with it\n",
"standard error": "between chair and keyboard\n",
}
run("python -u respond_both.py", responses=responses, hide=True)
def stdin_mirroring_isnt_cpu_heavy(self):
"stdin mirroring isn't CPU-heavy"
with assert_cpu_usage(lt=5.0):
run("python -u busywork.py 10", pty=True, hide=True)
class stdin_mirroring:
def piped_stdin_is_not_conflated_with_mocked_stdin(self):
# Re: GH issue #308
# Will die on broken-pipe OSError if bug is present.
run("echo 'lollerskates' | inv -c nested_or_piped foo")
def nested_invoke_sessions_not_conflated_with_mocked_stdin(self):
# Also re: GH issue #308. This one will just hang forever. Woo!
run("inv -c nested_or_piped calls_foo")
| bsd-2-clause | Python |
b8d3002eb7bdad0ba0fef2820bb887b126c2d15f | Create socket if not created by the Node | dushyant88/lymph,alazaro/lymph,lyudmildrx/lymph,mouadino/lymph,deliveryhero/lymph,emulbreh/lymph,mouadino/lymph,itakouna/lymph,torte/lymph,emulbreh/lymph,alazaro/lymph,alazaro/lymph,mamachanko/lymph,itakouna/lymph,itakouna/lymph,mouadino/lymph,mamachanko/lymph,mamachanko/lymph,lyudmildrx/lymph,kstrempel/lymph,Drahflow/lymph,lyudmildrx/lymph,vpikulik/lymph | iris/web/interfaces.py | iris/web/interfaces.py | from gevent.pywsgi import WSGIServer
from werkzeug.wrappers import Request
from werkzeug.exceptions import HTTPException
from iris.core.interfaces import Interface
from iris.core import trace
from iris.utils.sockets import create_socket
class WebServiceInterface(Interface):
http_port = 80
def __init__(self, *args, **kwargs):
super(WebServiceInterface, self).__init__(*args, **kwargs)
self.wsgi_server = None
def on_start(self):
super(WebServiceInterface, self).on_start()
try:
socket_fd = self.container.get_shared_socket_fd(self.http_port)
except KeyError:
print self.container.ip, self.http_port
socket = create_socket('%s:%s' % (self.config.get('ip') or
self.container.ip,
self.http_port),
inheritable=True)
socket_fd = socket.fileno()
self.http_socket = create_socket('fd://%s' % socket_fd)
self.wsgi_server = WSGIServer(self.http_socket, Request.application(self.dispatch_request))
self.wsgi_server.start()
def on_stop(self):
self.wsgi_server.stop()
super(WebServiceInterface, self).on_stop()
def dispatch_request(self, request):
trace.set_id()
urls = self.url_map.bind_to_environ(request.environ)
request.urls = urls
try:
endpoint, args = urls.match()
if callable(endpoint):
handler = endpoint(self, request)
response = handler.dispatch(args)
else:
try:
handler = getattr(self, endpoint)
except AttributeError:
raise # FIXME
response = handler(request, **args)
except HTTPException as e:
response = e.get_response(request.environ)
return response
| from gevent.pywsgi import WSGIServer
from werkzeug.wrappers import Request
from werkzeug.exceptions import HTTPException
from iris.core.interfaces import Interface
from iris.core import trace
from iris.utils.sockets import create_socket
class WebServiceInterface(Interface):
http_port = 80
def __init__(self, *args, **kwargs):
super(WebServiceInterface, self).__init__(*args, **kwargs)
self.wsgi_server = None
def on_start(self):
super(WebServiceInterface, self).on_start()
self.http_socket = create_socket('fd://%s' % self.container.get_shared_socket_fd(self.http_port))
self.wsgi_server = WSGIServer(self.http_socket, Request.application(self.dispatch_request))
self.wsgi_server.start()
def on_stop(self):
self.wsgi_server.stop()
super(WebServiceInterface, self).on_stop()
def dispatch_request(self, request):
trace.set_id()
urls = self.url_map.bind_to_environ(request.environ)
request.urls = urls
try:
endpoint, args = urls.match()
if callable(endpoint):
handler = endpoint(self, request)
response = handler.dispatch(args)
else:
try:
handler = getattr(self, endpoint)
except AttributeError:
raise # FIXME
response = handler(request, **args)
except HTTPException as e:
response = e.get_response(request.environ)
return response
| apache-2.0 | Python |
ec9588025890d86747667c00aaae6b58a548997c | Bump version | simplefin/siloscript,simplefin/siloscript,simplefin/siloscript | siloscript/version.py | siloscript/version.py | # Copyright (c) The SimpleFIN Team
# See LICENSE for details.
__version__ = "0.2.1"
| # Copyright (c) The SimpleFIN Team
# See LICENSE for details.
__version__ = "0.1.0-dev"
| apache-2.0 | Python |
b1ade8f0705a692cf09aaefe05fcb5469f08170b | Add parenthesis for python3 support | pdedumast/ShapeVariationAnalyzer,DCBIA-OrthoLab/ShapeVariationAnalyzer,DCBIA-OrthoLab/ShapeVariationAnalyzer,DCBIA-OrthoLab/ShapeVariationAnalyzer,pdedumast/ShapeVariationAnalyzer | src/py/generatelib/add_heatkernel_vtk.py | src/py/generatelib/add_heatkernel_vtk.py | import argparse
import inputData
import glob
import os
import vtk
import scipy.io as sio
import numpy as np
from vtk.util import numpy_support
parser = argparse.ArgumentParser(description='Shape Variation Analyzer')
parser.add_argument('--dataPath', action='store', dest='dirwithSub', help='folder with subclasses', required=True)
if __name__ == '__main__':
args = parser.parse_args()
dataPath=args.dirwithSub
inputdata = inputData.inputData()
data_folders = inputdata.get_folder_classes_list(dataPath)
print(data_folders)
polydata = vtk.vtkPolyData()
for datafolders in data_folders:
i=0
vtklist = glob.glob(os.path.join(datafolders, "*.vtk"))
print(vtklist)
matfile = glob.glob(os.path.join(datafolders,"*.mat"))
matfile_str = ''.join(map(str,matfile))
print ('matfile',matfile_str)
for matlabfilename in matfile:
mat_contents = sio.loadmat(matlabfilename,squeeze_me=True)
shape_matlab = mat_contents['shape']
for vtkfilename in vtklist:
#if vtkfilename.endswith((".vtk")):
# print i
if(vtkfilename[:-4] == matlabfilename[:-4]):
#vtkfilename = matfile_str.replace('.mat','.vtk')
print('vtkfilename',vtkfilename)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkfilename)
reader.Update()
polydata = reader.GetOutput()
#for matfilename in matlist:
#if matfilename.endswith((".mat")):
print('################')
#shape1 = shape_matlab[i]
heat_kernel = np.array(shape_matlab['sihks'].tolist())
print (heat_kernel.shape)
for j in range(0,19):
print(j)
heat_kernel_2 = heat_kernel[:,j]
shape_heat_kernel = heat_kernel_2.shape
print(shape_heat_kernel)
heat_kernel_data = numpy_support.numpy_to_vtk(heat_kernel_2.ravel(),deep=True,array_type=vtk.VTK_FLOAT)
heat_kernel_data.SetNumberOfComponents(1);
heat_kernel_data.SetName('heat_kernel_signature_'+str(j));
polydata.GetPointData().AddArray(heat_kernel_data)
print("Writing", vtkfilename)
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(vtkfilename)
writer.SetInputData(polydata)
writer.Write()
i+=1;
| import argparse
import inputData
import glob
import os
import vtk
import scipy.io as sio
import numpy as np
from vtk.util import numpy_support
parser = argparse.ArgumentParser(description='Shape Variation Analyzer')
parser.add_argument('--dataPath', action='store', dest='dirwithSub', help='folder with subclasses', required=True)
if __name__ == '__main__':
args = parser.parse_args()
dataPath=args.dirwithSub
inputdata = inputData.inputData()
data_folders = inputdata.get_folder_classes_list(dataPath)
print(data_folders)
polydata = vtk.vtkPolyData()
for datafolders in data_folders:
i=0
vtklist = glob.glob(os.path.join(datafolders, "*.vtk"))
print(vtklist)
matfile = glob.glob(os.path.join(datafolders,"*.mat"))
matfile_str = ''.join(map(str,matfile))
print ('matfile',matfile_str)
for matlabfilename in matfile:
mat_contents = sio.loadmat(matlabfilename,squeeze_me=True)
shape_matlab = mat_contents['shape']
for vtkfilename in vtklist:
#if vtkfilename.endswith((".vtk")):
# print i
if(vtkfilename[:-4] == matlabfilename[:-4]):
#vtkfilename = matfile_str.replace('.mat','.vtk')
print('vtkfilename',vtkfilename)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(vtkfilename)
reader.Update()
polydata = reader.GetOutput()
#for matfilename in matlist:
#if matfilename.endswith((".mat")):
print('################')
#shape1 = shape_matlab[i]
heat_kernel = np.array(shape_matlab['sihks'].tolist())
print (heat_kernel.shape)
for j in range(0,19):
print(j)
heat_kernel_2 = heat_kernel[:,j]
shape_heat_kernel = heat_kernel_2.shape
print(shape_heat_kernel)
heat_kernel_data = numpy_support.numpy_to_vtk(heat_kernel_2.ravel(),deep=True,array_type=vtk.VTK_FLOAT)
heat_kernel_data.SetNumberOfComponents(1);
heat_kernel_data.SetName('heat_kernel_signature_'+str(j));
polydata.GetPointData().AddArray(heat_kernel_data)
print("Writing", vtkfilename)
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(vtkfilename)
writer.SetInputData(polydata)
writer.Write()
i+=1;
| apache-2.0 | Python |
7c430d4679ce1cc51f7566bc6479fee8aa7427d6 | Use platform_group in Android device metric (#2515) | google/clusterfuzz,google/clusterfuzz,google/clusterfuzz,google/clusterfuzz,google/clusterfuzz,google/clusterfuzz,google/clusterfuzz,google/clusterfuzz | src/python/bot/startup/android_heartbeat.py | src/python/bot/startup/android_heartbeat.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android Heartbeat script that monitors
whether Android device is still running or not."""
# Before any other imports, we must fix the path. Some libraries might expect
# to be able to import dependencies directly, but we must store these in
# subdirectories of common so that they are shared with App Engine.
from clusterfuzz._internal.base import modules
modules.fix_module_search_paths()
import time
from clusterfuzz._internal.base import dates
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.metrics import monitor
from clusterfuzz._internal.metrics import monitoring_metrics
from clusterfuzz._internal.platforms import android
from clusterfuzz._internal.system import environment
def main():
"""Run a cycle of heartbeat checks to ensure Android device is running."""
logs.configure('android_heartbeat')
dates.initialize_timezone_from_environment()
environment.set_bot_environment()
monitor.initialize()
if environment.is_android_cuttlefish():
android.adb.set_cuttlefish_device_serial()
device_serial = environment.get_value('ANDROID_SERIAL')
while True:
state = android.adb.get_device_state()
if state == android.adb.DEVICE_NOT_FOUND_STRING.format(
serial=device_serial):
android.adb.connect_to_cuttlefish_device()
state = android.adb.get_device_state()
logs.log('Android device %s state: %s' % (device_serial, state))
monitoring_metrics.ANDROID_UPTIME.increment_by(
int(state == 'device'), {
'serial': device_serial or '',
'platform': environment.get_platform_group() or '',
})
time.sleep(data_types.ANDROID_HEARTBEAT_WAIT_INTERVAL)
if data_handler.bot_run_timed_out():
break
if __name__ == '__main__':
main()
monitor.stop()
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android Heartbeat script that monitors
whether Android device is still running or not."""
# Before any other imports, we must fix the path. Some libraries might expect
# to be able to import dependencies directly, but we must store these in
# subdirectories of common so that they are shared with App Engine.
from clusterfuzz._internal.base import modules
modules.fix_module_search_paths()
import time
from clusterfuzz._internal.base import dates
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.metrics import monitor
from clusterfuzz._internal.metrics import monitoring_metrics
from clusterfuzz._internal.platforms import android
from clusterfuzz._internal.system import environment
def main():
"""Run a cycle of heartbeat checks to ensure Android device is running."""
logs.configure('android_heartbeat')
dates.initialize_timezone_from_environment()
environment.set_bot_environment()
monitor.initialize()
if environment.is_android_cuttlefish():
android.adb.set_cuttlefish_device_serial()
device_serial = environment.get_value('ANDROID_SERIAL')
while True:
state = android.adb.get_device_state()
if state == android.adb.DEVICE_NOT_FOUND_STRING.format(
serial=device_serial):
android.adb.connect_to_cuttlefish_device()
state = android.adb.get_device_state()
logs.log('Android device %s state: %s' % (device_serial, state))
monitoring_metrics.ANDROID_UPTIME.increment_by(
int(state == 'device'), {
'serial': device_serial,
'platform': environment.platform(),
})
time.sleep(data_types.ANDROID_HEARTBEAT_WAIT_INTERVAL)
if data_handler.bot_run_timed_out():
break
if __name__ == '__main__':
main()
monitor.stop()
| apache-2.0 | Python |
dc20ec37ff2cd19b96d0023d8ada1e11350a7b2f | Remove user_id requirement (#8527) | mvaled/sentry,mvaled/sentry,mvaled/sentry,mvaled/sentry,mvaled/sentry,ifduyue/sentry,looker/sentry,beeftornado/sentry,ifduyue/sentry,beeftornado/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry,looker/sentry,ifduyue/sentry,looker/sentry,beeftornado/sentry,ifduyue/sentry,looker/sentry | src/sentry/analytics/events/project_issue_searched.py | src/sentry/analytics/events/project_issue_searched.py | from __future__ import absolute_import, print_function
from sentry import analytics
class ProjectIssueSearchEvent(analytics.Event):
type = 'project_issue.searched'
attributes = (
analytics.Attribute('user_id', required=False),
analytics.Attribute('organization_id'),
analytics.Attribute('project_id'),
analytics.Attribute('query'),
)
analytics.register(ProjectIssueSearchEvent)
| from __future__ import absolute_import, print_function
from sentry import analytics
class ProjectIssueSearchEvent(analytics.Event):
type = 'project_issue.searched'
attributes = (
analytics.Attribute('user_id'),
analytics.Attribute('organization_id'),
analytics.Attribute('project_id'),
analytics.Attribute('query'),
)
analytics.register(ProjectIssueSearchEvent)
| bsd-3-clause | Python |
0e7d35b5d7ed62e1500741134156dffacf6fee84 | Fix doc error. | koenedaele/skosprovider | skosprovider/utils.py | skosprovider/utils.py | # -*- coding: utf-8 -*-
'''
This module contains utility functions for dealing with skos providers.
'''
from skosprovider.skos import (
Concept,
Collection
)
def dict_dumper(provider):
'''
Dump a provider to a format that can be passed to a
:class:`skosprovider.providers.DictionaryProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into a `dict`.
:rtype: A list of dicts.
.. versionadded:: 0.2.0
'''
ret = []
for stuff in provider.get_all():
c = provider.get_by_id(stuff['id'])
labels = [l.__dict__ for l in c.labels]
if isinstance(c, Concept):
notes = [n.__dict__ for n in c.notes]
ret.append({
'id': c.id,
'type': 'concept',
'labels': labels,
'notes': notes,
'narrower': c.narrower,
'broader': c.broader,
'related': c.related
})
elif isinstance(c, Collection):
ret.append({
'id': c.id,
'type': 'collection',
'labels': labels,
'members': c.members
})
return ret
| # -*- coding: utf-8 -*-
'''
This module contains utility functions for dealing with skos providers.
'''
from skosprovider.skos import (
Concept,
Collection
)
def dict_dumper(provider):
'''
Dump a provider to a format that can be passed to a
:class:`skosprovider.providers.FlatDictionaryProvider` or a
:class:`skosprovider.providers.TreeDictionaryProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
that wil be turned into a `dict`.
:rtype: A list of dicts.
.. versionadded:: 0.2.0
'''
ret = []
for stuff in provider.get_all():
c = provider.get_by_id(stuff['id'])
labels = [l.__dict__ for l in c.labels]
if isinstance(c, Concept):
notes = [n.__dict__ for n in c.notes]
ret.append({
'id': c.id,
'type': 'concept',
'labels': labels,
'notes': notes,
'narrower': c.narrower,
'broader': c.broader,
'related': c.related
})
elif isinstance(c, Collection):
ret.append({
'id': c.id,
'type': 'collection',
'labels': labels,
'members': c.members
})
return ret
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.